aboutsummaryrefslogtreecommitdiff
path: root/clang
diff options
context:
space:
mode:
Diffstat (limited to 'clang')
-rw-r--r--clang/docs/ClangFormatStyleOptions.rst83
-rw-r--r--clang/docs/InternalsManual.rst110
-rw-r--r--clang/docs/ReleaseNotes.rst12
-rw-r--r--clang/include/clang/Basic/BuiltinsX86.td9
-rw-r--r--clang/include/clang/Basic/DirectoryEntry.h10
-rw-r--r--clang/include/clang/Basic/FileEntry.h6
-rw-r--r--clang/include/clang/Basic/riscv_vector.td83
-rw-r--r--clang/include/clang/Basic/riscv_vector_common.td68
-rw-r--r--clang/include/clang/Format/Format.h86
-rw-r--r--clang/include/clang/Sema/Sema.h53
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h17
-rw-r--r--clang/lib/AST/ASTConcept.cpp2
-rw-r--r--clang/lib/AST/ByteCode/InterpBuiltin.cpp9
-rw-r--r--clang/lib/AST/ByteCode/Program.cpp5
-rw-r--r--clang/lib/AST/ExprConstant.cpp8
-rw-r--r--clang/lib/Analysis/ExprMutationAnalyzer.cpp20
-rw-r--r--clang/lib/CodeGen/CGAtomic.cpp153
-rw-r--r--clang/lib/Format/BreakableToken.cpp6
-rw-r--r--clang/lib/Format/ContinuationIndenter.cpp23
-rw-r--r--clang/lib/Format/DefinitionBlockSeparator.cpp2
-rw-r--r--clang/lib/Format/Format.cpp22
-rw-r--r--clang/lib/Format/FormatToken.cpp13
-rw-r--r--clang/lib/Format/FormatToken.h3
-rw-r--r--clang/lib/Format/TokenAnnotator.cpp92
-rw-r--r--clang/lib/Format/WhitespaceManager.cpp124
-rw-r--r--clang/lib/Headers/avx2intrin.h8
-rw-r--r--clang/lib/Headers/avx512bwintrin.h15
-rw-r--r--clang/lib/Headers/avx512vlbwintrin.h8
-rw-r--r--clang/lib/Headers/tmmintrin.h13
-rw-r--r--clang/lib/Parse/ParseTemplate.cpp6
-rw-r--r--clang/lib/Sema/SemaChecking.cpp9
-rw-r--r--clang/lib/Sema/SemaConcept.cpp78
-rw-r--r--clang/lib/Sema/SemaDecl.cpp136
-rw-r--r--clang/lib/Sema/SemaRISCV.cpp3
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiate.cpp93
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiateDecl.cpp36
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp8
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h2
-rw-r--r--clang/lib/StaticAnalyzer/Core/CheckerManager.cpp11
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp62
-rw-r--r--clang/lib/Support/RISCVVIntrinsicUtils.cpp5
-rw-r--r--clang/test/AST/ByteCode/extern.cpp6
-rw-r--r--clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1.c104
-rw-r--r--clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sb.c48
-rw-r--r--clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sh.c32
-rw-r--r--clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sw.c16
-rw-r--r--clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1ub.c48
-rw-r--r--clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1uh.c32
-rw-r--r--clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1uw.c16
-rw-r--r--clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1.c104
-rw-r--r--clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1b.c24
-rw-r--r--clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1h.c16
-rw-r--r--clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1w.c8
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfadd.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfclass.c134
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmacc.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmadd.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmax.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmerge.c69
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmin.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsac.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsub.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmul.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmv.c189
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt.c724
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rod.c113
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rtz.c267
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmacc.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmadd.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsac.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsub.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrec7.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsqrt7.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsub.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnj.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjn.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjx.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1down.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1up.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsub.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwadd.c899
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwcvt.c366
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmacc.c486
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmsac.c486
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmul.c455
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmacc.c494
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmsac.c494
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwsub.c899
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfeq.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfge.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfgt.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfle.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmflt.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfne.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfadd.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfclass.c134
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmacc.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmadd.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmax.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmerge.c69
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmin.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsac.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsub.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmul.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmv.c69
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt.c724
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rod.c113
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rtz.c267
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmacc.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmadd.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsac.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsub.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrec7.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsqrt7.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsub.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnj.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjn.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjx.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1down.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1up.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsub.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwadd.c893
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwcvt.c366
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmacc.c474
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmsac.c474
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmul.c451
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmacc.c480
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmsac.c480
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwsub.c893
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfeq.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfge.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfgt.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfle.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmflt.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfne.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfadd.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfclass.c272
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmacc.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmadd.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmax.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmerge.c69
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmin.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsac.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsub.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmul.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmv.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt.c1577
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rod.c233
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rtz.c572
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmacc.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmadd.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsac.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsub.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrec7.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsqrt7.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsub.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnj.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjn.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjx.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1down.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1up.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsub.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwadd.c2007
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwcvt.c765
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmacc.c1017
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmsac.c1017
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmul.c1015
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmacc.c1034
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmsac.c1034
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwsub.c2007
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfeq.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfge.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfgt.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfle.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmflt.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfne.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfadd.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfclass.c272
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmacc.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmadd.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmax.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmerge.c69
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmin.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsac.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsub.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmul.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmv.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt.c1539
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rod.c233
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rtz.c572
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmacc.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmadd.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsac.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsub.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrec7.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsqrt7.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsub.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnj.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjn.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjx.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1down.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1up.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsub.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwadd.c1932
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwcvt.c765
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmacc.c977
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmsac.c977
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmul.c975
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmacc.c994
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmsac.c994
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwsub.c1932
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfeq.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfge.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfgt.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfle.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmflt.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfne.c129
-rw-r--r--clang/test/CodeGen/X86/avx10_2bf16-builtins.c8
-rw-r--r--clang/test/CodeGen/X86/avx2-builtins.c1
-rw-r--r--clang/test/CodeGen/X86/avx512bw-builtins.c18
-rw-r--r--clang/test/CodeGen/X86/avx512f-builtins.c52
-rw-r--r--clang/test/CodeGen/X86/avx512fp16-builtins.c6
-rw-r--r--clang/test/CodeGen/X86/avx512vl-builtins.c96
-rw-r--r--clang/test/CodeGen/X86/avx512vlbw-builtins.c28
-rw-r--r--clang/test/CodeGen/X86/mmx-builtins.c1
-rw-r--r--clang/test/CodeGen/X86/ssse3-builtins.c1
-rw-r--r--clang/test/CodeGen/arm-mve-intrinsics/cplusplus.cpp2
-rw-r--r--clang/test/CodeGen/arm-mve-intrinsics/dup.c24
-rw-r--r--clang/test/CodeGen/arm-mve-intrinsics/load-store.c88
-rw-r--r--clang/test/CodeGen/builtin-masked.c18
-rw-r--r--clang/test/CodeGen/c11atomics-ios.c4
-rw-r--r--clang/test/CodeGenCXX/builtin-atomic-compare_exchange.cpp130
-rw-r--r--clang/test/Frontend/rewrite-includes-bom.c6
-rw-r--r--clang/test/Headers/wasm.c18
-rw-r--r--clang/test/Lexer/minimize_source_to_dependency_directives_utf8bom.c2
-rw-r--r--clang/test/Modules/crash-vfs-relative-incdir.m2
-rw-r--r--clang/test/Modules/crash-vfs-run-reproducer.m2
-rw-r--r--clang/test/Sema/attr-print.c3
-rw-r--r--clang/test/SemaCXX/libstdcxx_pair_swap_hack.cpp7
-rw-r--r--clang/test/SemaCXX/warn-implicit-unicode-conversions.cpp8
-rw-r--r--clang/test/SemaTemplate/concepts.cpp43
-rw-r--r--clang/test/SemaTemplate/instantiate-self.cpp28
-rw-r--r--clang/test/lit.cfg.py13
-rw-r--r--clang/unittests/Analysis/ExprMutationAnalyzerTest.cpp38
-rw-r--r--clang/unittests/Format/AlignBracketsTest.cpp13
-rw-r--r--clang/unittests/Format/ConfigParseTest.cpp13
-rw-r--r--clang/unittests/Format/FormatTest.cpp17
-rw-r--r--clang/unittests/Format/FormatTestCSharp.cpp2
-rw-r--r--clang/unittests/Format/FormatTestComments.cpp52
-rw-r--r--clang/unittests/Format/FormatTestJava.cpp2
-rw-r--r--clang/unittests/Format/FormatTestTextProto.cpp2
-rw-r--r--clang/unittests/Format/FormatTestVerilog.cpp2
-rw-r--r--clang/unittests/Format/TokenAnnotatorTest.cpp28
-rwxr-xr-xclang/www/cxx_status.html9
257 files changed, 70750 insertions, 1013 deletions
diff --git a/clang/docs/ClangFormatStyleOptions.rst b/clang/docs/ClangFormatStyleOptions.rst
index b746df5..570cab2 100644
--- a/clang/docs/ClangFormatStyleOptions.rst
+++ b/clang/docs/ClangFormatStyleOptions.rst
@@ -245,7 +245,7 @@ the configuration (without a prefix: ``Auto``).
.. note::
This currently only applies to braced initializer lists (when
- ``Cpp11BracedListStyle`` is ``true``) and parentheses.
+ ``Cpp11BracedListStyle`` is not ``Block``) and parentheses.
@@ -3816,29 +3816,72 @@ the configuration (without a prefix: ``Auto``).
.. _Cpp11BracedListStyle:
-**Cpp11BracedListStyle** (``Boolean``) :versionbadge:`clang-format 3.4` :ref:`¶ <Cpp11BracedListStyle>`
- If ``true``, format braced lists as best suited for C++11 braced
- lists.
+**Cpp11BracedListStyle** (``BracedListStyle``) :versionbadge:`clang-format 3.4` :ref:`¶ <Cpp11BracedListStyle>`
+ The style to handle braced lists.
- Important differences:
+ Possible values:
- * No spaces inside the braced list.
- * No line break before the closing brace.
- * Indentation with the continuation indent, not with the block indent.
+ * ``BLS_Block`` (in configuration: ``Block``)
+ Best suited for pre C++11 braced lists.
- Fundamentally, C++11 braced lists are formatted exactly like function
- calls would be formatted in their place. If the braced list follows a name
- (e.g. a type or variable name), clang-format formats as if the ``{}`` were
- the parentheses of a function call with that name. If there is no name,
- a zero-length name is assumed.
+ * Spaces inside the braced list.
+ * Line break before the closing brace.
+ * Indentation with the block indent.
+
+
+ .. code-block:: c++
+
+ vector<int> x{ 1, 2, 3, 4 };
+ vector<T> x{ {}, {}, {}, {} };
+ f(MyMap[{ composite, key }]);
+ new int[3]{ 1, 2, 3 };
+ Type name{ // Comment
+ value
+ };
+
+ * ``BLS_FunctionCall`` (in configuration: ``FunctionCall``)
+ Best suited for C++11 braced lists.
+
+ * No spaces inside the braced list.
+ * No line break before the closing brace.
+ * Indentation with the continuation indent.
+
+ Fundamentally, C++11 braced lists are formatted exactly like function
+ calls would be formatted in their place. If the braced list follows a
+ name (e.g. a type or variable name), clang-format formats as if the
+ ``{}`` were the parentheses of a function call with that name. If there
+ is no name, a zero-length name is assumed.
+
+ .. code-block:: c++
+
+ vector<int> x{1, 2, 3, 4};
+ vector<T> x{{}, {}, {}, {}};
+ f(MyMap[{composite, key}]);
+ new int[3]{1, 2, 3};
+ Type name{ // Comment
+ value};
+
+ * ``BLS_AlignFirstComment`` (in configuration: ``AlignFirstComment``)
+ Same as ``FunctionCall``, except for the handling of a comment at the
+ begin, it then aligns everything following with the comment.
+
+ * No spaces inside the braced list. (Even for a comment at the first
+ position.)
+ * No line break before the closing brace.
+ * Indentation with the continuation indent, except when followed by a
+ line comment, then it uses the block indent.
+
+
+ .. code-block:: c++
+
+ vector<int> x{1, 2, 3, 4};
+ vector<T> x{{}, {}, {}, {}};
+ f(MyMap[{composite, key}]);
+ new int[3]{1, 2, 3};
+ Type name{// Comment
+ value};
- .. code-block:: c++
- true: false:
- vector<int> x{1, 2, 3, 4}; vs. vector<int> x{ 1, 2, 3, 4 };
- vector<T> x{{}, {}, {}, {}}; vector<T> x{ {}, {}, {}, {} };
- f(MyMap[{composite, key}]); f(MyMap[{ composite, key }]);
- new int[3]{1, 2, 3}; new int[3]{ 1, 2, 3 };
.. _DeriveLineEnding:
@@ -6625,7 +6668,7 @@ the configuration (without a prefix: ``Auto``).
.. note::
This option doesn't apply to initializer braces if
- ``Cpp11BracedListStyle`` is set to ``true``.
+ ``Cpp11BracedListStyle`` is not ``Block``.
Possible values:
diff --git a/clang/docs/InternalsManual.rst b/clang/docs/InternalsManual.rst
index c677ddfa..eff46ab 100644
--- a/clang/docs/InternalsManual.rst
+++ b/clang/docs/InternalsManual.rst
@@ -10,7 +10,7 @@ Introduction
This document describes some of the more important APIs and internal design
decisions made in the Clang C front-end. The purpose of this document is to
-both capture some of this high level information and also describe some of the
+both capture some of this high-level information and also describe some of the
design decisions behind it. This is meant for people interested in hacking on
Clang, not for end-users. The description below is categorized by libraries,
and does not describe any of the clients of the libraries.
@@ -20,7 +20,7 @@ LLVM Support Library
The LLVM ``libSupport`` library provides many underlying libraries and
`data-structures <https://llvm.org/docs/ProgrammersManual.html>`_, including
-command line option processing, various containers and a system abstraction
+command line option processing, various containers, and a system abstraction
layer, which is used for file system access.
The Clang "Basic" Library
@@ -34,7 +34,7 @@ and information about the subset of the language being compiled for.
Part of this infrastructure is specific to C (such as the ``TargetInfo``
class), other parts could be reused for other non-C-based languages
(``SourceLocation``, ``SourceManager``, ``Diagnostics``, ``FileManager``).
-When and if there is future demand we can figure out if it makes sense to
+When and if there is future demand, we can figure out if it makes sense to
introduce a new library, move the general classes somewhere else, or introduce
some other solution.
@@ -96,7 +96,7 @@ The ``EXTENSION`` and ``EXTWARN`` severities are used for extensions to the
language that Clang accepts. This means that Clang fully understands and can
represent them in the AST, but we produce diagnostics to tell the user their
code is non-portable. The difference is that the former are ignored by
-default, and the later warn by default. The ``WARNING`` severity is used for
+default, and the latter warn by default. The ``WARNING`` severity is used for
constructs that are valid in the currently selected source language but that
are dubious in some way. The ``REMARK`` severity provides generic information
about the compilation that is not necessarily related to any dubious code. The
@@ -106,7 +106,7 @@ These *severities* are mapped into a smaller set (the ``Diagnostic::Level``
enum, {``Ignored``, ``Note``, ``Remark``, ``Warning``, ``Error``, ``Fatal``}) of
output
*levels* by the diagnostics subsystem based on various configuration options.
-Clang internally supports a fully fine grained mapping mechanism that allows
+Clang internally supports a fully fine-grained mapping mechanism that allows
you to map almost any diagnostic to the output level that you want. The only
diagnostics that cannot be mapped are ``NOTE``\ s, which always follow the
severity of the previously emitted diagnostic and ``ERROR``\ s, which can only
@@ -116,18 +116,18 @@ example).
Diagnostic mappings are used in many ways. For example, if the user specifies
``-pedantic``, ``EXTENSION`` maps to ``Warning``, if they specify
``-pedantic-errors``, it turns into ``Error``. This is used to implement
-options like ``-Wunused_macros``, ``-Wundef`` etc.
+options like ``-Wunused_macros``, ``-Wundef``, etc.
Mapping to ``Fatal`` should only be used for diagnostics that are considered so
severe that error recovery won't be able to recover sensibly from them (thus
-spewing a ton of bogus errors). One example of this class of error are failure
+spewing a ton of bogus errors). One example of this class of error is failure
to ``#include`` a file.
Diagnostic Wording
^^^^^^^^^^^^^^^^^^
The wording used for a diagnostic is critical because it is the only way for a
user to know how to correct their code. Use the following suggestions when
-wording a diagnostic.
+wording a diagnostic:
* Diagnostics in Clang do not start with a capital letter and do not end with
punctuation.
@@ -162,7 +162,7 @@ wording a diagnostic.
cannot be null in well-defined C++ code``.
* Prefer diagnostic wording without contractions whenever possible. The single
quote in a contraction can be visually distracting due to its use with
- syntactic constructs and contractions can be harder to understand for non-
+ syntactic constructs, and contractions can be harder to understand for non-
native English speakers.
The Format String
@@ -195,14 +195,14 @@ the C++ code that :ref:`produces them <internals-producing-diag>`, and are
referenced by ``%0`` .. ``%9``. If you have more than 10 arguments to your
diagnostic, you are doing something wrong :). Unlike ``printf``, there is no
requirement that arguments to the diagnostic end up in the output in the same
-order as they are specified, you could have a format string with "``%1 %0``"
+order as they are specified; you could have a format string with "``%1 %0``"
that swaps them, for example. The text in between the percent and digit are
formatting instructions. If there are no instructions, the argument is just
turned into a string and substituted in.
Here are some "best practices" for writing the English format string:
-* Keep the string short. It should ideally fit in the 80 column limit of the
+* Keep the string short. It should ideally fit in the 80-column limit of the
``DiagnosticKinds.td`` file. This avoids the diagnostic wrapping when
printed, and forces you to think about the important point you are conveying
with the diagnostic.
@@ -227,7 +227,7 @@ used to achieve this sort of thing in a localizable way, see below.
Formatting a Diagnostic Argument
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Arguments to diagnostics are fully typed internally, and come from a couple
+Arguments to diagnostics are fully typed internally and come from a couple of
different classes: integers, types, names, and random strings. Depending on
the class of the argument, it can be optionally formatted in different ways.
This gives the ``DiagnosticConsumer`` information about what the argument means
@@ -268,7 +268,7 @@ Description:
This format specifier is used to merge multiple related diagnostics together
into one common one, without requiring the difference to be specified as an
English string argument. Instead of specifying the string, the diagnostic
- gets an integer argument and the format string selects the numbered option.
+ gets an integer argument, and the format string selects the numbered option.
In this case, the "``%0``" value must be an integer in the range [0..2]. If
it is 0, it prints "unary", if it is 1 it prints "binary" if it is 2, it
prints "unary or binary". This allows other language translations to
@@ -287,7 +287,7 @@ Description:
additionally generates a namespace, enumeration, and enumerator list based on
the format string given. In the above case, a namespace is generated named
``FrobbleKind`` that has an unscoped enumeration with the enumerators
- ``VarDecl`` and ``FuncDecl`` which correspond to the values 0 and 1. This
+ ``VarDecl`` and ``FuncDecl``, which correspond to the values 0 and 1. This
permits a clearer use of the ``Diag`` in source code, as the above could be
called as: ``Diag(Loc, diag::frobble) << diag::FrobbleKind::VarDecl``.
@@ -407,7 +407,7 @@ Example:
def note_ovl_candidate : Note<
"candidate %sub{select_ovl_candidate}3,2,1 not viable">;
- and will act as if it was written
+ and will act as if it were written
``"candidate %select{function|constructor}3%select{| template| %1}2 not viable"``.
Description:
This format specifier is used to avoid repeating strings verbatim in multiple
@@ -447,7 +447,7 @@ For example, the binary expression error comes from code like this:
<< lex->getType() << rex->getType()
<< lex->getSourceRange() << rex->getSourceRange();
-This shows that use of the ``Diag`` method: it takes a location (a
+This shows the use of the ``Diag`` method: it takes a location (a
:ref:`SourceLocation <SourceLocation>` object) and a diagnostic enum value
(which matches the name from ``Diagnostic*Kinds.td``). If the diagnostic takes
arguments, they are specified with the ``<<`` operator: the first argument
@@ -586,7 +586,7 @@ Strangely enough, the ``SourceLocation`` class represents a location within the
source code of the program. Important design points include:
#. ``sizeof(SourceLocation)`` must be extremely small, as these are embedded
- into many AST nodes and are passed around often. Currently it is 32 bits.
+ into many AST nodes and are passed around often. Currently, it is 32 bits.
#. ``SourceLocation`` must be a simple value object that can be efficiently
copied.
#. We should be able to represent a source location for any byte of any input
@@ -605,7 +605,7 @@ In practice, the ``SourceLocation`` works together with the ``SourceManager``
class to encode two pieces of information about a location: its spelling
location and its expansion location. For most tokens, these will be the
same. However, for a macro expansion (or tokens that came from a ``_Pragma``
-directive) these will describe the location of the characters corresponding to
+directive), these will describe the location of the characters corresponding to
the token and the location where the token was used (i.e., the macro
expansion point or the location of the ``_Pragma`` itself).
@@ -621,7 +621,7 @@ token. This concept maps directly to the "spelling location" for the token.
.. mostly taken from https://discourse.llvm.org/t/code-ranges-of-tokens-ast-elements/16893/2
Clang represents most source ranges by [first, last], where "first" and "last"
-each point to the beginning of their respective tokens. For example consider
+each point to the beginning of their respective tokens. For example, consider
the ``SourceRange`` of the following statement:
.. code-block:: text
@@ -632,7 +632,7 @@ the ``SourceRange`` of the following statement:
To map from this representation to a character-based representation, the "last"
location needs to be adjusted to point to (or past) the end of that token with
either ``Lexer::MeasureTokenLength()`` or ``Lexer::getLocForEndOfToken()``. For
-the rare cases where character-level source ranges information is needed we use
+the rare cases where character-level source ranges information is needed, we use
the ``CharSourceRange`` class.
The Driver Library
@@ -651,17 +651,17 @@ The Frontend Library
====================
The Frontend library contains functionality useful for building tools on top of
-the Clang libraries, for example several methods for outputting diagnostics.
+the Clang libraries, including several methods for outputting diagnostics.
Compiler Invocation
-------------------
One of the classes provided by the Frontend library is ``CompilerInvocation``,
-which holds information that describe current invocation of the Clang ``-cc1``
+which holds information that describes the current invocation of the Clang ``-cc1``
frontend. The information typically comes from the command line constructed by
the Clang driver or from clients performing custom initialization. The data
structure is split into logical units used by different parts of the compiler,
-for example ``PreprocessorOptions``, ``LanguageOptions`` or ``CodeGenOptions``.
+for example, ``PreprocessorOptions``, ``LanguageOptions``, or ``CodeGenOptions``.
Command Line Interface
----------------------
@@ -698,7 +698,7 @@ Adding new Command Line Option
------------------------------
When adding a new command line option, the first place of interest is the header
-file declaring the corresponding options class (e.g. ``CodeGenOptions.h`` for
+file declaring the corresponding options class (e.g., ``CodeGenOptions.h`` for
command line option that affects the code generation). Create new member
variable for the option value:
@@ -739,7 +739,7 @@ The helper classes take a list of acceptable prefixes of the option (e.g.
Then, specify additional attributes via mix-ins:
* ``HelpText`` holds the text that will be printed besides the option name when
- the user requests help (e.g. via ``clang --help``).
+ the user requests help (e.g., via ``clang --help``).
* ``Group`` specifies the "category" of options this option belongs to. This is
used by various tools to categorize and sometimes filter options.
* ``Flags`` may contain "tags" associated with the option. These may affect how
@@ -779,7 +779,7 @@ use them to construct the ``-cc1`` job:
}
The last step is implementing the ``-cc1`` command line argument
-parsing/generation that initializes/serializes the option class (in our case
+parsing/generation that initializes/serializes the option class (in our case,
``CodeGenOptions``) stored within ``CompilerInvocation``. This can be done
automatically by using the marshalling annotations on the option definition:
@@ -946,13 +946,13 @@ described below. All of them take a key path argument and possibly other
information required for parsing or generating the command line argument.
**Note:** The marshalling infrastructure is not intended for driver-only
-options. Only options of the ``-cc1`` frontend need to be marshalled to/from
+options. Only options of the ``-cc1`` frontend need to be marshalled to/from a
``CompilerInvocation`` instance.
**Positive Flag**
The key path defaults to ``false`` and is set to ``true`` when the flag is
-present on command line.
+present on the command line.
.. code-block:: text
@@ -963,7 +963,7 @@ present on command line.
**Negative Flag**
The key path defaults to ``true`` and is set to ``false`` when the flag is
-present on command line.
+present on the command line.
.. code-block:: text
@@ -1041,7 +1041,7 @@ and the result is assigned to the key path on success.
The key path defaults to the value specified in ``MarshallingInfoEnum`` prefixed
by the contents of ``NormalizedValuesScope`` and ``::``. This ensures correct
-reference to an enum case is formed even if the enum resides in different
+reference to an enum case is formed even if the enum resides in a different
namespace or is an enum class. If the value present on the command line does not
match any of the comma-separated values from ``Values``, an error diagnostic is
issued. Otherwise, the corresponding element from ``NormalizedValues`` at the
@@ -1410,7 +1410,7 @@ or a clear engineering tradeoff -- should desugar minimally and wrap the result
in a construct representing the original source form.
For example, ``CXXForRangeStmt`` directly represents the syntactic form of a
-range-based for statement, but also holds a semantic representation of the
+range-based for statement but also holds a semantic representation of the
range declaration and iterator declarations. It does not contain a
fully-desugared ``ForStmt``, however.
@@ -1425,7 +1425,7 @@ with the same or similar semantics.
The ``Type`` class and its subclasses
-------------------------------------
-The ``Type`` class (and its subclasses) are an important part of the AST.
+The ``Type`` class (and its subclasses) is an important part of the AST.
Types are accessed through the ``ASTContext`` class, which implicitly creates
and uniques them as they are needed. Types have a couple of non-obvious
features: 1) they do not capture type qualifiers like ``const`` or ``volatile``
@@ -1474,7 +1474,7 @@ various operators (for example, the type of ``*Y`` is "``foo``", not
is an instance of the ``TypedefType`` class, which indicates that the type of
these expressions is a typedef for "``foo``".
-Representing types like this is great for diagnostics, because the
+Representing types like this is great for diagnostics because the
user-specified type is always immediately available. There are two problems
with this: first, various semantic checks need to make judgements about the
*actual structure* of a type, ignoring typedefs. Second, we need an efficient
@@ -1521,7 +1521,7 @@ know it exists. To continue the example, the result type of the indirection
operator is the pointee type of the subexpression. In order to determine the
type, we need to get the instance of ``PointerType`` that best captures the
typedef information in the program. If the type of the expression is literally
-a ``PointerType``, we can return that, otherwise we have to dig through the
+a ``PointerType``, we can return that; otherwise, we have to dig through the
typedefs to find the pointer type. For example, if the subexpression had type
"``foo*``", we could return that type as the result. If the subexpression had
type "``bar``", we want to return "``foo*``" (note that we do *not* want
@@ -1552,7 +1552,7 @@ that sets a bit), and remove one or more type qualifiers (just return a
``QualType`` with the bitfield set to empty).
Further, because the bits are stored outside of the type itself, we do not need
-to create duplicates of types with different sets of qualifiers (i.e. there is
+to create duplicates of types with different sets of qualifiers (i.e., there is
only a single heap allocated "``int``" type: "``const int``" and "``volatile
const int``" both point to the same heap allocated "``int``" type). This
reduces the heap size used to represent bits and also means we do not have to
@@ -1972,7 +1972,7 @@ and optimize code for it, but it's used as parsing continues to detect further
errors in the input. Clang-based tools also depend on such ASTs, and IDEs in
particular benefit from a high-quality AST for broken code.
-In presence of errors, clang uses a few error-recovery strategies to present the
+In the presence of errors, clang uses a few error-recovery strategies to present the
broken code in the AST:
- correcting errors: in cases where clang is confident about the fix, it
@@ -1981,7 +1981,7 @@ broken code in the AST:
provide more accurate subsequent diagnostics. Typo correction is a typical
example.
- representing invalid node: the invalid node is preserved in the AST in some
- form, e.g. when the "declaration" part of the declaration contains semantic
+ form, e.g., when the "declaration" part of the declaration contains semantic
errors, the Decl node is marked as invalid.
- dropping invalid node: this often happens for errors that we don’t have
graceful recovery. Prior to Recovery AST, a mismatched-argument function call
@@ -1994,9 +1994,9 @@ for broken code.
Recovery AST
^^^^^^^^^^^^
-The idea of Recovery AST is to use recovery nodes which act as a placeholder to
+The idea of Recovery AST is to use recovery nodes, which act as a placeholder to
maintain the rough structure of the parsing tree, preserve locations and
-children but have no language semantics attached to them.
+children, but have no language semantics attached to them.
For example, consider the following mismatched function call:
@@ -2031,10 +2031,10 @@ With Recovery AST, the AST looks like:
`-DeclRefExpr <col:9> 'int' lvalue ParmVar 'abc' 'int'
-An alternative is to use existing Exprs, e.g. CallExpr for the above example.
-This would capture more call details (e.g. locations of parentheses) and allow
+An alternative is to use existing Exprs, e.g., CallExpr for the above example.
+This would capture more call details (e.g., locations of parentheses) and allow
it to be treated uniformly with valid CallExprs. However, jamming the data we
-have into CallExpr forces us to weaken its invariants, e.g. arg count may be
+have into CallExpr forces us to weaken its invariants, e.g., arg count may be
wrong. This would introduce a huge burden on consumers of the AST to handle such
"impossible" cases. So when we're representing (rather than correcting) errors,
we use a distinct recovery node type with extremely weak invariants instead.
@@ -2048,7 +2048,7 @@ Types and dependence
^^^^^^^^^^^^^^^^^^^^
``RecoveryExpr`` is an ``Expr``, so it must have a type. In many cases the true
-type can't really be known until the code is corrected (e.g. a call to a
+type can't really be known until the code is corrected (e.g., a call to a
function that doesn't exist). And it means that we can't properly perform type
checks on some containing constructs, such as ``return 42 + unknownFunction()``.
@@ -2058,7 +2058,7 @@ mean dependence on a template parameter or how an error is repaired. The
``DependentTy``, and this suppresses type-based analysis in the same way it
would inside a template.
-In cases where we are confident about the concrete type (e.g. the return type
+In cases where we are confident about the concrete type (e.g., the return type
for a broken non-overloaded function call), the ``RecoveryExpr`` will have this
type. This allows more code to be typechecked, and produces a better AST and
more diagnostics. For example:
@@ -2071,7 +2071,7 @@ more diagnostics. For example:
Whether or not the ``RecoveryExpr`` has a dependent type, it is always
considered value-dependent, because its value isn't well-defined until the error
is resolved. Among other things, this means that clang doesn't emit more errors
-where a RecoveryExpr is used as a constant (e.g. array size), but also won't try
+where a RecoveryExpr is used as a constant (e.g., array size), but also won't try
to evaluate it.
ContainsErrors bit
@@ -2122,7 +2122,7 @@ cycles. One example of a cycle is the connection between a
``ClassTemplateDecl`` and its "templated" ``CXXRecordDecl``. The *templated*
``CXXRecordDecl`` represents all the fields and methods inside the class
template, while the ``ClassTemplateDecl`` holds the information which is
-related to being a template, i.e. template arguments, etc. We can get the
+related to being a template, i.e., template arguments, etc. We can get the
*templated* class (the ``CXXRecordDecl``) of a ``ClassTemplateDecl`` with
``ClassTemplateDecl::getTemplatedDecl()``. And we can get back a pointer of the
"described" class template from the *templated* class:
@@ -2145,7 +2145,7 @@ we skip the copy.
The informal definition of structural equivalency is the following:
Two nodes are **structurally equivalent** if they are
-- builtin types and refer to the same type, e.g. ``int`` and ``int`` are
+- builtin types and refer to the same type, e.g., ``int`` and ``int`` are
structurally equivalent,
- function types and all their parameters have structurally equivalent types,
- record types and all their fields in order of their definition have the same
@@ -2162,7 +2162,7 @@ mentioned properties, we have to check for equivalent template
parameters/arguments, etc.
The structural equivalent check can be and is used independently from the
-ASTImporter, e.g. the ``clang::Sema`` class uses it also.
+ASTImporter, e.g., the ``clang::Sema`` class uses it also.
The equivalence of nodes may depend on the equivalency of other pairs of nodes.
Thus, the check is implemented as a parallel graph traversal. We traverse
@@ -2195,7 +2195,7 @@ Redeclaration Chains
^^^^^^^^^^^^^^^^^^^^
The early version of the ``ASTImporter``'s merge mechanism squashed the
-declarations, i.e. it aimed to have only one declaration instead of maintaining
+declarations, i.e., it aimed to have only one declaration instead of maintaining
a whole redeclaration chain. This early approach simply skipped importing a
function prototype, but it imported a definition. To demonstrate the problem
with this approach let's consider an empty "to" context and the following
@@ -2225,7 +2225,7 @@ another definition, we will use the existing definition. However, we can import
prototype(s): we chain the newly imported prototype(s) to the existing
definition. Whenever we import a new prototype from a third context, that will
be added to the end of the redeclaration chain. This may result in long
-redeclaration chains in certain cases, e.g. if we import from several
+redeclaration chains in certain cases, e.g., if we import from several
translation units which include the same header with the prototype.
.. Squashing prototypes
@@ -2290,7 +2290,7 @@ Traversal during the Import
^^^^^^^^^^^^^^^^^^^^^^^^^^^
The node specific import mechanisms are implemented in
-``ASTNodeImporter::VisitNode()`` functions, e.g. ``VisitFunctionDecl()``.
+``ASTNodeImporter::VisitNode()`` functions, e.g., ``VisitFunctionDecl()``.
When we import a declaration then first we import everything which is needed to
call the constructor of that declaration node. Everything which can be set
later is set after the node is created. For example, in case of a
@@ -2490,7 +2490,7 @@ In case of LLDB, an implementation of the ``ExternalASTSource`` interface is
attached to the AST context which is related to the parsed expression. This
implementation of the ``ExternalASTSource`` interface is realized with the help
of the ``ASTImporter`` class. This way, LLDB can reuse Clang's parsing
-machinery while synthesizing the underlying AST from the debug data (e.g. from
+machinery while synthesizing the underlying AST from the debug data (e.g., from
DWARF). From the view of the ``ASTImporter`` this means both the "to" and the
"from" context may have declaration contexts with external lexical storage. If
a ``DeclContext`` in the "to" AST context has external lexical storage then we
@@ -2573,7 +2573,7 @@ conflict error (ODR violation in C++). In this case, we return with an
clients of the ``ASTImporter`` may require a different, perhaps less
conservative and more liberal error handling strategy.
-E.g. static analysis clients may benefit if the node is created even if there
+E.g., static analysis clients may benefit if the node is created even if there
is a name conflict. During the CTU analysis of certain projects, we recognized
that there are global declarations which collide with declarations from other
translation units, but they are not referenced outside from their translation
@@ -2916,7 +2916,7 @@ Any error during satisfaction is recorded in ``ConstraintSatisfaction``.
for nested requirements, ``ConstraintSatisfaction`` is stored (including
diagnostics) in the AST, which is something we might want to improve.
-When an atomic constraint is not satified, we try to substitute into any
+When an atomic constraint is not satisfied, we try to substitute into any
enclosing concept-id using the same mechanism described above, for
diagnostics purpose, and inject that in the ``ConstraintSatisfaction``.
@@ -3584,7 +3584,7 @@ be specified by appending a ``+`` to the number. For example:
void f(); // expected-note 0+ {{previous declaration is here}}
void g(); // expected-note 1+ {{previous declaration is here}}
-In the first example, the diagnostic becomes optional, i.e. it will be
+In the first example, the diagnostic becomes optional, i.e., it will be
swallowed if it occurs, but will not generate an error if it does not occur. In
the second example, the diagnostic must occur at least once. As a short-hand,
"one or more" can be specified simply by ``+``. For example:
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index 8b42468..fe77f91 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -128,6 +128,17 @@ AST Dumping Potentially Breaking Changes
- Default arguments of template template parameters are pretty-printed now.
+- Pretty-printing of ``asm`` attributes are now always the first attribute
+ on the right side of the declaration. Before we had, e.g.:
+
+ ``__attribute__(("visibility")) asm("string")``
+
+ Now we have:
+
+ ``asm("string") __attribute__(("visibility"))``
+
+ Which is accepted by both clang and gcc parsers.
+
Clang Frontend Potentially Breaking Changes
-------------------------------------------
- Members of anonymous unions/structs are now injected as ``IndirectFieldDecl``
@@ -478,6 +489,7 @@ Bug Fixes to C++ Support
- Fix a crash when attempting to deduce a deduction guide from a non deducible template template parameter. (#130604)
- Fix for clang incorrectly rejecting the default construction of a union with
nontrivial member when another member has an initializer. (#GH81774)
+- Fixed a template depth issue when parsing lambdas inside a type constraint. (#GH162092)
- Diagnose unresolved overload sets in non-dependent compound requirements. (#GH51246) (#GH97753)
Bug Fixes to AST Handling
diff --git a/clang/include/clang/Basic/BuiltinsX86.td b/clang/include/clang/Basic/BuiltinsX86.td
index 62c70fba..d03c778 100644
--- a/clang/include/clang/Basic/BuiltinsX86.td
+++ b/clang/include/clang/Basic/BuiltinsX86.td
@@ -124,13 +124,13 @@ let Attributes = [Const, NoThrow, RequiredVectorWidth<128>] in {
}
let Features = "ssse3" in {
- def pmulhrsw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
def psignb128 : X86Builtin<"_Vector<16, char>(_Vector<16, char>, _Vector<16, char>)">;
def psignw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
def psignd128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">;
}
let Features = "ssse3", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
+ def pmulhrsw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
def pmaddubsw128 : X86Builtin<"_Vector<8, short>(_Vector<16, char>, _Vector<16, char>)">;
def pshufb128 : X86Builtin<"_Vector<16, char>(_Vector<16, char>, _Vector<16, char>)">;
}
@@ -608,7 +608,6 @@ let Features = "avx2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] i
def palignr256 : X86Builtin<"_Vector<32, char>(_Vector<32, char>, _Vector<32, char>, _Constant int)">;
def pmovmskb256 : X86Builtin<"int(_Vector<32, char>)">;
- def pmulhrsw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>)">;
def psadbw256 : X86Builtin<"_Vector<4, long long int>(_Vector<32, char>, _Vector<32, char>)">;
def psignb256 : X86Builtin<"_Vector<32, char>(_Vector<32, char>, _Vector<32, char>)">;
def psignw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>)">;
@@ -661,6 +660,7 @@ let Features = "avx2", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWi
def psrawi256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, int)">;
def psradi256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, int)">;
+ def pmulhrsw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>)">;
def pmulhuw256 : X86Builtin<"_Vector<16, unsigned short>(_Vector<16, unsigned short>, _Vector<16, unsigned short>)">;
def pmulhw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>)">;
@@ -1386,13 +1386,10 @@ let Features = "avx512bitalg", Attributes = [NoThrow, Const, RequiredVectorWidth
def vpshufbitqmb512_mask : X86Builtin<"unsigned long long int(_Vector<64, char>, _Vector<64, char>, unsigned long long int)">;
}
-let Features = "avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
- def pmulhrsw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<32, short>)">;
-}
-
let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def pavgb512 : X86Builtin<"_Vector<64, unsigned char>(_Vector<64, unsigned char>, _Vector<64, unsigned char>)">;
def pavgw512 : X86Builtin<"_Vector<32, unsigned short>(_Vector<32, unsigned short>, _Vector<32, unsigned short>)">;
+ def pmulhrsw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<32, short>)">;
def pmulhuw512 : X86Builtin<"_Vector<32, unsigned short>(_Vector<32, unsigned short>, _Vector<32, unsigned short>)">;
def pmulhw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<32, short>)">;
}
diff --git a/clang/include/clang/Basic/DirectoryEntry.h b/clang/include/clang/Basic/DirectoryEntry.h
index 35fe529..6965612 100644
--- a/clang/include/clang/Basic/DirectoryEntry.h
+++ b/clang/include/clang/Basic/DirectoryEntry.h
@@ -119,10 +119,10 @@ namespace FileMgr {
/// the private optional_none_tag to keep it to the size of a single pointer.
template <class RefTy> class MapEntryOptionalStorage {
using optional_none_tag = typename RefTy::optional_none_tag;
- RefTy MaybeRef;
+ RefTy MaybeRef = optional_none_tag();
public:
- MapEntryOptionalStorage() : MaybeRef(optional_none_tag()) {}
+ MapEntryOptionalStorage() = default;
template <class... ArgTypes>
explicit MapEntryOptionalStorage(std::in_place_t, ArgTypes &&...Args)
@@ -168,11 +168,7 @@ class OptionalStorage<clang::DirectoryEntryRef>
clang::FileMgr::MapEntryOptionalStorage<clang::DirectoryEntryRef>;
public:
- OptionalStorage() = default;
-
- template <class... ArgTypes>
- explicit OptionalStorage(std::in_place_t, ArgTypes &&...Args)
- : StorageImpl(std::in_place_t{}, std::forward<ArgTypes>(Args)...) {}
+ using StorageImpl::StorageImpl;
OptionalStorage &operator=(clang::DirectoryEntryRef Ref) {
StorageImpl::operator=(Ref);
diff --git a/clang/include/clang/Basic/FileEntry.h b/clang/include/clang/Basic/FileEntry.h
index c973ba3..e7091fd 100644
--- a/clang/include/clang/Basic/FileEntry.h
+++ b/clang/include/clang/Basic/FileEntry.h
@@ -218,11 +218,7 @@ class OptionalStorage<clang::FileEntryRef>
clang::FileMgr::MapEntryOptionalStorage<clang::FileEntryRef>;
public:
- OptionalStorage() = default;
-
- template <class... ArgTypes>
- explicit OptionalStorage(std::in_place_t, ArgTypes &&...Args)
- : StorageImpl(std::in_place_t{}, std::forward<ArgTypes>(Args)...) {}
+ using StorageImpl::StorageImpl;
OptionalStorage &operator=(clang::FileEntryRef Ref) {
StorageImpl::operator=(Ref);
diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 07a8724..96d8300 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -1013,9 +1013,9 @@ let ManualCodegen = [{
}] in {
let HasFRMRoundModeOp = true in {
// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
- defm vfadd : RVVFloatingBinBuiltinSetRoundingMode;
- defm vfsub : RVVFloatingBinBuiltinSetRoundingMode;
- defm vfrsub : RVVFloatingBinVFBuiltinSetRoundingMode;
+ defm vfadd : RVVFloatingBinBuiltinSetRoundingMode<HasBF=1>;
+ defm vfsub : RVVFloatingBinBuiltinSetRoundingMode<HasBF=1>;
+ defm vfrsub : RVVFloatingBinVFBuiltinSetRoundingMode<HasBF=1>;
// 13.3. Vector Widening Floating-Point Add/Subtract Instructions
// Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW
@@ -1023,14 +1023,14 @@ let ManualCodegen = [{
defm vfwsub : RVVFloatingWidenOp0BinBuiltinSetRoundingMode;
// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
- defm vfmul : RVVFloatingBinBuiltinSetRoundingMode;
+ defm vfmul : RVVFloatingBinBuiltinSetRoundingMode<HasBF=1>;
defm vfdiv : RVVFloatingBinBuiltinSetRoundingMode;
defm vfrdiv : RVVFloatingBinVFBuiltinSetRoundingMode;
}
// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
- defm vfadd : RVVFloatingBinBuiltinSet;
- defm vfsub : RVVFloatingBinBuiltinSet;
- defm vfrsub : RVVFloatingBinVFBuiltinSet;
+ defm vfadd : RVVFloatingBinBuiltinSet<HasBF=1>;
+ defm vfsub : RVVFloatingBinBuiltinSet<HasBF=1>;
+ defm vfrsub : RVVFloatingBinVFBuiltinSet<HasBF=1>;
// 13.3. Vector Widening Floating-Point Add/Subtract Instructions
// Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW
@@ -1038,7 +1038,7 @@ let ManualCodegen = [{
defm vfwsub : RVVFloatingWidenOp0BinBuiltinSet;
// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
- defm vfmul : RVVFloatingBinBuiltinSet;
+ defm vfmul : RVVFloatingBinBuiltinSet<HasBF=1>;
defm vfdiv : RVVFloatingBinBuiltinSet;
defm vfrdiv : RVVFloatingBinVFBuiltinSet;
}
@@ -1065,6 +1065,10 @@ let ManualCodegen = [{
defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "x",
[["vv", "w", "wvvu"],
["vf", "w", "wveu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "y",
+ [["vv", "vw", "wvvu"],
+ ["vf", "vw", "wveu"]]>;
}
}
// 13.3. Vector Widening Floating-Point Add/Subtract Instructions
@@ -1081,6 +1085,10 @@ let ManualCodegen = [{
defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "x",
[["vv", "w", "wvv"],
["vf", "w", "wve"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "y",
+ [["vv", "vw", "wvv"],
+ ["vf", "vw", "wve"]]>;
}
}
}
@@ -1170,6 +1178,8 @@ let ManualCodegen = [{
defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "fd", [["v", "v", "vvu"]]>;
let RequiredFeatures = ["zvfh"] in
defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "x", [["v", "v", "vvu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "y", [["v", "v", "vvu"]]>;
}
// 13.8. Vector Floating-Point Square-Root Instruction
defm vfsqrt : RVVOutBuiltinSet<"vfsqrt", "fd", [["v", "v", "vv"]]>;
@@ -1180,21 +1190,26 @@ let ManualCodegen = [{
defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "fd", [["v", "v", "vv"]]>;
let RequiredFeatures = ["zvfh"] in
defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "x", [["v", "v", "vv"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "y", [["v", "v", "vv"]]>;
}
// 13.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
defm vfrsqrt7 : RVVOutBuiltinSet<"vfrsqrt7", "fd", [["v", "v", "vv"]]>;
let RequiredFeatures = ["zvfh"] in
defm vfrsqrt7 : RVVOutBuiltinSet<"vfrsqrt7", "x", [["v", "v", "vv"]]>;
+let RequiredFeatures = ["zvfbfa"] in
+ defm vfrsqrt7 : RVVOutBuiltinSet<"vfrsqrt7", "y", [["v", "v", "vv"]]>;
+
// 13.11. Vector Floating-Point MIN/MAX Instructions
-defm vfmin : RVVFloatingBinBuiltinSet;
-defm vfmax : RVVFloatingBinBuiltinSet;
+defm vfmin : RVVFloatingBinBuiltinSet<HasBF=1>;
+defm vfmax : RVVFloatingBinBuiltinSet<HasBF=1>;
// 13.12. Vector Floating-Point Sign-Injection Instructions
-defm vfsgnj : RVVFloatingBinBuiltinSet;
-defm vfsgnjn : RVVFloatingBinBuiltinSet;
-defm vfsgnjx : RVVFloatingBinBuiltinSet;
+defm vfsgnj : RVVFloatingBinBuiltinSet<HasBF=1>;
+defm vfsgnjn : RVVFloatingBinBuiltinSet<HasBF=1>;
+defm vfsgnjx : RVVFloatingBinBuiltinSet<HasBF=1>;
}
defm vfneg_v : RVVPseudoVFUnaryBuiltin<"vfsgnjn", "fd">;
let RequiredFeatures = ["zvfh"] in
@@ -1219,6 +1234,8 @@ let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vfclass : RVVOp0BuiltinSet<"vfclass", "fd", [["v", "Uv", "Uvv"]]>;
let RequiredFeatures = ["zvfh"] in
defm vfclass : RVVOp0BuiltinSet<"vfclass", "x", [["v", "Uv", "Uvv"]]>;
+let RequiredFeatures = ["zvfbfa"] in
+ defm vfclass : RVVOp0BuiltinSet<"vfclass", "y", [["v", "vUv", "Uvv"]]>;
}
// 13.15. Vector Floating-Point Merge Instruction
@@ -1239,6 +1256,9 @@ let HasMasked = false,
let RequiredFeatures = ["zvfh"] in
defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "x",
[["vfm", "v", "vvem"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "y",
+ [["vfm", "v", "vvem"]]>;
}
// 13.16. Vector Floating-Point Move Instruction
@@ -1252,6 +1272,9 @@ let HasMasked = false,
let RequiredFeatures = ["zvfh"] in
defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "x",
[["f", "v", "ve"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "y",
+ [["f", "v", "ve"]]>;
}
// 13.17. Single-Width Floating-Point/Integer Type-Convert Instructions
@@ -1287,10 +1310,16 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
defm : RVVConvBuiltinSet<"vfwcvt_f_x_v", "c", [["Fw", "Fwv"]]>;
}
}
+ let RequiredFeatures = ["zvfbfa"], OverloadedName = "vfwcvt_f_bf16" in {
+ defm : RVVConvBuiltinSet<"vfwcvt_f_xu_v", "c", [["Yw", "YwUv"]]>;
+ defm : RVVConvBuiltinSet<"vfwcvt_f_x_v", "c", [["Yw", "Ywv"]]>;
+ }
let OverloadedName = "vfwcvt_f" in {
defm : RVVConvBuiltinSet<"vfwcvt_f_f_v", "f", [["w", "wv"]]>;
let RequiredFeatures = ["zvfhmin"] in
defm : RVVConvBuiltinSet<"vfwcvt_f_f_v", "x", [["w", "wv"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfwcvt_f_f_v", "y", [["vw", "wv"]]>;
}
}
@@ -1300,17 +1329,23 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
defm : RVVConvBuiltinSet<"vfncvt_rtz_xu_f_w", "si", [["Uv", "UvFw"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_rtz_xu_f_w", "c", [["Uv", "UvFw"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfncvt_rtz_xu_f_w", "c", [["YwUv", "UvYw"]]>;
}
let OverloadedName = "vfncvt_rtz_x" in {
defm : RVVConvBuiltinSet<"vfncvt_rtz_x_f_w", "si", [["Iv", "IvFw"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_rtz_x_f_w", "c", [["Iv", "IvFw"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfncvt_rtz_x_f_w", "c", [["YwIv", "IvYw"]]>;
}
let OverloadedName = "vfncvt_rod_f" in {
defm : RVVConvBuiltinSet<"vfncvt_rod_f_f_w", "f", [["v", "vw"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_rod_f_f_w", "x", [["v", "vw"]]>;
}
+ let RequiredFeatures = ["zvfbfa"], OverloadedName = "vfncvt_rod_f_bf16" in
+ defm : RVVConvBuiltinSet<"vfncvt_rod_f_f_w", "y", [["v", "vw"]]>;
}
// Zvfbfmin - Vector convert BF16 to FP32
@@ -1363,11 +1398,15 @@ let ManualCodegen = [{
defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "si", [["Iv", "IvFwu"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["Iv", "IvFwu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["YwIv", "IvYwu"]]>;
}
let OverloadedName = "vfncvt_xu" in {
defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "si", [["Uv", "UvFwu"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["Uv", "UvFwu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["YwUv", "UvYwu"]]>;
}
let OverloadedName = "vfncvt_f" in {
defm : RVVConvBuiltinSet<"vfncvt_f_x_w", "f", [["v", "vIwu"]]>;
@@ -1382,6 +1421,8 @@ let ManualCodegen = [{
let RequiredFeatures = ["zvfhmin"] in
defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "x", [["v", "vwu"]]>;
}
+ let RequiredFeatures = ["zvfbfa"], OverloadedName = "vfncvt_f_bf16" in
+ defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "y", [["v", "vwu"]]>;
}
// Zvfbfmin - Vector convert FP32 to BF16
@@ -1430,11 +1471,15 @@ let ManualCodegen = [{
defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "si", [["Iv", "IvFw"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["Iv", "IvFw"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["YwIv", "IvYw"]]>;
}
let OverloadedName = "vfncvt_xu" in {
defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "si", [["Uv", "UvFw"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["Uv", "UvFw"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["YwUv", "UvYw"]]>;
}
let OverloadedName = "vfncvt_f" in {
defm : RVVConvBuiltinSet<"vfncvt_f_x_w", "f", [["v", "vIw"]]>;
@@ -1449,6 +1494,8 @@ let ManualCodegen = [{
let RequiredFeatures = ["zvfhmin"] in
defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "x", [["v", "vw"]]>;
}
+ let RequiredFeatures = ["zvfbfa"], OverloadedName = "vfncvt_f_bf16" in
+ defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "y", [["v", "vw"]]>;
}
// Zvfbfmin - Vector convert FP32 to BF16
@@ -1578,6 +1625,9 @@ let HasMasked = false, MaskedPolicyScheme = NonePolicy in {
let RequiredFeatures = ["zvfh"] in
defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "x",
[["s", "ve", "ev"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "y",
+ [["s", "ve", "ev"]]>;
}
let OverloadedName = "vfmv_s",
UnMaskedPolicyScheme = HasPassthruOperand,
@@ -1589,6 +1639,9 @@ let HasMasked = false, MaskedPolicyScheme = NonePolicy in {
defm vfmv_s : RVVOutBuiltinSet<"vfmv_s_f", "x",
[["f", "v", "ve"],
["x", "Uv", "UvUe"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfmv_s : RVVOutBuiltinSet<"vfmv_s_f", "y",
+ [["f", "v", "ve"]]>;
}
}
@@ -1601,11 +1654,11 @@ defm vslidedown : RVVSlideDownBuiltinSet;
// 16.3.3. Vector Slide1up Instructions
let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vslide1up : RVVSlideOneBuiltinSet;
-defm vfslide1up : RVVFloatingBinVFBuiltinSet;
+defm vfslide1up : RVVFloatingBinVFBuiltinSet<HasBF=1>;
// 16.3.4. Vector Slide1down Instruction
defm vslide1down : RVVSlideOneBuiltinSet;
-defm vfslide1down : RVVFloatingBinVFBuiltinSet;
+defm vfslide1down : RVVFloatingBinVFBuiltinSet<HasBF=1>;
// 16.4. Vector Register Gather Instructions
// signed and floating type
diff --git a/clang/include/clang/Basic/riscv_vector_common.td b/clang/include/clang/Basic/riscv_vector_common.td
index 767bcee..eaa2ba4 100644
--- a/clang/include/clang/Basic/riscv_vector_common.td
+++ b/clang/include/clang/Basic/riscv_vector_common.td
@@ -83,6 +83,8 @@
// elements of the same width
// F: given a vector type, compute the vector type with floating-point type
// elements of the same width
+// Y: given a vector type, compute the vector type with bfloat16 type elements
+// of the same width
// S: given a vector type, computes its equivalent one for LMUL=1. This is a
// no-op if the vector was already LMUL=1
// (Log2EEW:Value): Log2EEW value could be 3/4/5/6 (8/16/32/64), given a
@@ -470,6 +472,10 @@ let HasMaskedOffOperand = false in {
defm "" : RVVOutOp1BuiltinSet<NAME, "x",
[["vv", "v", "vvvv"],
["vf", "v", "vvev"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1BuiltinSet<NAME, "y",
+ [["vv", "v", "vvvv"],
+ ["vf", "v", "vvev"]]>;
}
multiclass RVVFloatingTerBuiltinSetRoundingMode {
defm "" : RVVOutOp1BuiltinSet<NAME, "fd",
@@ -479,6 +485,10 @@ let HasMaskedOffOperand = false in {
defm "" : RVVOutOp1BuiltinSet<NAME, "x",
[["vv", "v", "vvvvu"],
["vf", "v", "vvevu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1BuiltinSet<NAME, "y",
+ [["vv", "v", "vvvvu"],
+ ["vf", "v", "vvevu"]]>;
}
}
@@ -491,6 +501,10 @@ let HasMaskedOffOperand = false, Log2LMUL = [-2, -1, 0, 1, 2] in {
defm "" : RVVOutOp1Op2BuiltinSet<NAME, "x",
[["vv", "w", "wwvv"],
["vf", "w", "wwev"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1Op2BuiltinSet<NAME, "y",
+ [["vv", "vw", "wwvv"],
+ ["vf", "vw", "wwev"]]>;
}
multiclass RVVFloatingWidenTerBuiltinSetRoundingMode {
defm "" : RVVOutOp1Op2BuiltinSet<NAME, "f",
@@ -500,10 +514,14 @@ let HasMaskedOffOperand = false, Log2LMUL = [-2, -1, 0, 1, 2] in {
defm "" : RVVOutOp1Op2BuiltinSet<NAME, "x",
[["vv", "w", "wwvvu"],
["vf", "w", "wwevu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1Op2BuiltinSet<NAME, "y",
+ [["vv", "vw", "wwvvu"],
+ ["vf", "vw", "wwevu"]]>;
}
}
-multiclass RVVFloatingBinBuiltinSet {
+multiclass RVVFloatingBinBuiltinSet<bit HasBF = 0> {
defm "" : RVVOutOp1BuiltinSet<NAME, "fd",
[["vv", "v", "vvv"],
["vf", "v", "vve"]]>;
@@ -511,9 +529,15 @@ multiclass RVVFloatingBinBuiltinSet {
defm "" : RVVOutOp1BuiltinSet<NAME, "x",
[["vv", "v", "vvv"],
["vf", "v", "vve"]]>;
+ if HasBF then {
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1BuiltinSet<NAME, "y",
+ [["vv", "v", "vvv"],
+ ["vf", "v", "vve"]]>;
+ }
}
-multiclass RVVFloatingBinBuiltinSetRoundingMode {
+multiclass RVVFloatingBinBuiltinSetRoundingMode<bit HasBF = 0> {
defm "" : RVVOutOp1BuiltinSet<NAME, "fd",
[["vv", "v", "vvvu"],
["vf", "v", "vveu"]]>;
@@ -521,22 +545,38 @@ multiclass RVVFloatingBinBuiltinSetRoundingMode {
defm "" : RVVOutOp1BuiltinSet<NAME, "x",
[["vv", "v", "vvvu"],
["vf", "v", "vveu"]]>;
+ if HasBF then {
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1BuiltinSet<NAME, "y",
+ [["vv", "v", "vvvu"],
+ ["vf", "v", "vveu"]]>;
+ }
}
-multiclass RVVFloatingBinVFBuiltinSet {
+multiclass RVVFloatingBinVFBuiltinSet<bit HasBF = 0> {
defm "" : RVVOutOp1BuiltinSet<NAME, "fd",
[["vf", "v", "vve"]]>;
let RequiredFeatures = ["zvfh"] in
defm "" : RVVOutOp1BuiltinSet<NAME, "x",
[["vf", "v", "vve"]]>;
+ if HasBF then {
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1BuiltinSet<NAME, "y",
+ [["vf", "v", "vve"]]>;
+ }
}
-multiclass RVVFloatingBinVFBuiltinSetRoundingMode {
+multiclass RVVFloatingBinVFBuiltinSetRoundingMode<bit HasBF = 0> {
defm "" : RVVOutOp1BuiltinSet<NAME, "fd",
[["vf", "v", "vveu"]]>;
let RequiredFeatures = ["zvfh"] in
defm "" : RVVOutOp1BuiltinSet<NAME, "x",
[["vf", "v", "vveu"]]>;
+ if HasBF then {
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1BuiltinSet<NAME, "y",
+ [["vf", "v", "vveu"]]>;
+ }
}
multiclass RVVFloatingMaskOutBuiltinSet {
@@ -547,6 +587,10 @@ multiclass RVVFloatingMaskOutBuiltinSet {
defm "" : RVVOp0Op1BuiltinSet<NAME, "x",
[["vv", "vm", "mvv"],
["vf", "vm", "mve"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOp0Op1BuiltinSet<NAME, "y",
+ [["vv", "vm", "mvv"],
+ ["vf", "vm", "mve"]]>;
}
multiclass RVVFloatingMaskOutVFBuiltinSet
@@ -748,6 +792,10 @@ multiclass RVVFloatingWidenBinBuiltinSet {
defm "" : RVVWidenBuiltinSet<NAME, "x",
[["vv", "w", "wvv"],
["vf", "w", "wve"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVWidenBuiltinSet<NAME, "y",
+ [["vv", "vw", "wvv"],
+ ["vf", "vw", "wve"]]>;
}
multiclass RVVFloatingWidenBinBuiltinSetRoundingMode {
@@ -758,6 +806,10 @@ multiclass RVVFloatingWidenBinBuiltinSetRoundingMode {
defm "" : RVVWidenBuiltinSet<NAME, "x",
[["vv", "w", "wvvu"],
["vf", "w", "wveu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVWidenBuiltinSet<NAME, "y",
+ [["vv", "vw", "wvvu"],
+ ["vf", "vw", "wveu"]]>;
}
multiclass RVVFloatingWidenOp0BinBuiltinSet {
@@ -768,6 +820,10 @@ multiclass RVVFloatingWidenOp0BinBuiltinSet {
defm "" : RVVWidenWOp0BuiltinSet<NAME # "_w", "x",
[["wv", "w", "wwv"],
["wf", "w", "wwe"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVWidenWOp0BuiltinSet<NAME # "_w", "y",
+ [["wv", "vw", "wwv"],
+ ["wf", "ew", "wwe"]]>;
}
multiclass RVVFloatingWidenOp0BinBuiltinSetRoundingMode {
@@ -778,4 +834,8 @@ multiclass RVVFloatingWidenOp0BinBuiltinSetRoundingMode {
defm "" : RVVWidenWOp0BuiltinSet<NAME # "_w", "x",
[["wv", "w", "wwvu"],
["wf", "w", "wweu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVWidenWOp0BuiltinSet<NAME # "_w", "y",
+ [["wv", "vw", "wwvu"],
+ ["wf", "ew", "wweu"]]>;
}
diff --git a/clang/include/clang/Format/Format.h b/clang/include/clang/Format/Format.h
index 3df5b92..2852c4a 100644
--- a/clang/include/clang/Format/Format.h
+++ b/clang/include/clang/Format/Format.h
@@ -94,7 +94,7 @@ struct FormatStyle {
///
/// \note
/// This currently only applies to braced initializer lists (when
- /// ``Cpp11BracedListStyle`` is ``true``) and parentheses.
+ /// ``Cpp11BracedListStyle`` is not ``Block``) and parentheses.
/// \endnote
BAS_BlockIndent,
};
@@ -2555,29 +2555,67 @@ struct FormatStyle {
/// \version 3.7
unsigned ContinuationIndentWidth;
- /// If ``true``, format braced lists as best suited for C++11 braced
- /// lists.
- ///
- /// Important differences:
- ///
- /// * No spaces inside the braced list.
- /// * No line break before the closing brace.
- /// * Indentation with the continuation indent, not with the block indent.
- ///
- /// Fundamentally, C++11 braced lists are formatted exactly like function
- /// calls would be formatted in their place. If the braced list follows a name
- /// (e.g. a type or variable name), clang-format formats as if the ``{}`` were
- /// the parentheses of a function call with that name. If there is no name,
- /// a zero-length name is assumed.
- /// \code
- /// true: false:
- /// vector<int> x{1, 2, 3, 4}; vs. vector<int> x{ 1, 2, 3, 4 };
- /// vector<T> x{{}, {}, {}, {}}; vector<T> x{ {}, {}, {}, {} };
- /// f(MyMap[{composite, key}]); f(MyMap[{ composite, key }]);
- /// new int[3]{1, 2, 3}; new int[3]{ 1, 2, 3 };
- /// \endcode
+ /// Different ways to handle braced lists.
+ enum BracedListStyle : int8_t {
+ /// Best suited for pre C++11 braced lists.
+ ///
+ /// * Spaces inside the braced list.
+ /// * Line break before the closing brace.
+ /// * Indentation with the block indent.
+ ///
+ /// \code
+ /// vector<int> x{ 1, 2, 3, 4 };
+ /// vector<T> x{ {}, {}, {}, {} };
+ /// f(MyMap[{ composite, key }]);
+ /// new int[3]{ 1, 2, 3 };
+ /// Type name{ // Comment
+ /// value
+ /// };
+ /// \endcode
+ BLS_Block,
+ /// Best suited for C++11 braced lists.
+ ///
+ /// * No spaces inside the braced list.
+ /// * No line break before the closing brace.
+ /// * Indentation with the continuation indent.
+ ///
+ /// Fundamentally, C++11 braced lists are formatted exactly like function
+ /// calls would be formatted in their place. If the braced list follows a
+ /// name (e.g. a type or variable name), clang-format formats as if the
+ /// ``{}`` were the parentheses of a function call with that name. If there
+ /// is no name, a zero-length name is assumed.
+ /// \code
+ /// vector<int> x{1, 2, 3, 4};
+ /// vector<T> x{{}, {}, {}, {}};
+ /// f(MyMap[{composite, key}]);
+ /// new int[3]{1, 2, 3};
+ /// Type name{ // Comment
+ /// value};
+ /// \endcode
+ BLS_FunctionCall,
+ /// Same as ``FunctionCall``, except for the handling of a comment at the
+ /// begin, it then aligns everything following with the comment.
+ ///
+ /// * No spaces inside the braced list. (Even for a comment at the first
+ /// position.)
+ /// * No line break before the closing brace.
+ /// * Indentation with the continuation indent, except when followed by a
+ /// line comment, then it uses the block indent.
+ ///
+ /// \code
+ /// vector<int> x{1, 2, 3, 4};
+ /// vector<T> x{{}, {}, {}, {}};
+ /// f(MyMap[{composite, key}]);
+ /// new int[3]{1, 2, 3};
+ /// Type name{// Comment
+ /// value};
+ /// \endcode
+ BLS_AlignFirstComment,
+ };
+
+ /// The style to handle braced lists.
/// \version 3.4
- bool Cpp11BracedListStyle;
+ BracedListStyle Cpp11BracedListStyle;
/// This option is **deprecated**. See ``DeriveLF`` and ``DeriveCRLF`` of
/// ``LineEnding``.
@@ -4933,7 +4971,7 @@ struct FormatStyle {
/// Specifies when to insert a space in empty braces.
/// \note
/// This option doesn't apply to initializer braces if
- /// ``Cpp11BracedListStyle`` is set to ``true``.
+ /// ``Cpp11BracedListStyle`` is not ``Block``.
/// \endnote
/// \version 22
SpaceInEmptyBracesStyle SpaceInEmptyBraces;
diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h
index e3faaac..cb21335 100644
--- a/clang/include/clang/Sema/Sema.h
+++ b/clang/include/clang/Sema/Sema.h
@@ -3961,6 +3961,13 @@ public:
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = {});
+private:
+ // Perform a check on an AsmLabel to verify its consistency and emit
+ // diagnostics in case of an error.
+ void CheckAsmLabel(Scope *S, Expr *AsmLabelExpr, StorageClass SC,
+ TypeSourceInfo *TInfo, VarDecl *);
+
+public:
/// Perform semantic checking on a newly-created variable
/// declaration.
///
@@ -13000,6 +13007,37 @@ public:
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
+ using InstantiatingSpecializationsKey = llvm::PointerIntPair<Decl *, 2>;
+
+ struct RecursiveInstGuard {
+ enum class Kind {
+ Template,
+ DefaultArgument,
+ ExceptionSpec,
+ };
+
+ RecursiveInstGuard(Sema &S, Decl *D, Kind Kind)
+ : S(S), Key(D->getCanonicalDecl(), unsigned(Kind)) {
+ auto [_, Created] = S.InstantiatingSpecializations.insert(Key);
+ if (!Created)
+ Key = {};
+ }
+
+ ~RecursiveInstGuard() {
+ if (Key.getOpaqueValue()) {
+ [[maybe_unused]] bool Erased =
+ S.InstantiatingSpecializations.erase(Key);
+ assert(Erased);
+ }
+ }
+
+ operator bool() const { return Key.getOpaqueValue() == nullptr; }
+
+ private:
+ Sema &S;
+ Sema::InstantiatingSpecializationsKey Key;
+ };
+
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
@@ -13361,14 +13399,9 @@ public:
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
- /// Determine whether we are already instantiating this
- /// specialization in some surrounding active instantiation.
- bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
-
private:
Sema &SemaRef;
bool Invalid;
- bool AlreadyInstantiating;
InstantiatingTemplate(Sema &SemaRef,
CodeSynthesisContext::SynthesisKind Kind,
@@ -13498,7 +13531,7 @@ public:
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
- llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
+ llvm::DenseSet<InstantiatingSpecializationsKey> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
@@ -13773,6 +13806,14 @@ public:
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK, bool Complain = true);
+private:
+ bool InstantiateClassImpl(SourceLocation PointOfInstantiation,
+ CXXRecordDecl *Instantiation,
+ CXXRecordDecl *Pattern,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ TemplateSpecializationKind TSK, bool Complain);
+
+public:
/// Instantiate the definition of an enum from a given pattern.
///
/// \param PointOfInstantiation The point of instantiation within the
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
index c233ca1..4aee165 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
@@ -211,6 +211,16 @@ protected:
getExtraInvalidatedValues(ValueList &Values,
RegionAndSymbolInvalidationTraits *ETraits) const {}
+ /// A state for looking up relevant Environment entries (arguments, return
+ /// value), dynamic type information and similar "stable" things.
+ /// WARNING: During the evaluation of a function call, several state
+ /// transitions happen, so this state can become partially obsolete!
+ ///
+ /// TODO: Instead of storing a complete state object in the CallEvent, only
+ /// store the relevant parts (such as argument/return SVals etc.) that aren't
+ /// allowed to become obsolete until the end of the call evaluation.
+ ProgramStateRef getState() const { return State; }
+
public:
CallEvent &operator=(const CallEvent &) = delete;
virtual ~CallEvent() = default;
@@ -231,8 +241,11 @@ public:
}
void setForeign(bool B) const { Foreign = B; }
- /// The state in which the call is being evaluated.
- const ProgramStateRef &getState() const { return State; }
+ /// NOTE: There are plans for refactoring that would eliminate this method.
+ /// Prefer to use CheckerContext::getASTContext if possible!
+ const ASTContext &getASTContext() const {
+ return getState()->getStateManager().getContext();
+ }
/// The context in which the call is being evaluated.
const LocationContext *getLocationContext() const { return LCtx; }
diff --git a/clang/lib/AST/ASTConcept.cpp b/clang/lib/AST/ASTConcept.cpp
index fd12bc4..9ea104c 100644
--- a/clang/lib/AST/ASTConcept.cpp
+++ b/clang/lib/AST/ASTConcept.cpp
@@ -86,7 +86,7 @@ void ConstraintSatisfaction::Profile(llvm::FoldingSetNodeID &ID,
ID.AddPointer(ConstraintOwner);
ID.AddInteger(TemplateArgs.size());
for (auto &Arg : TemplateArgs)
- Arg.Profile(ID, C);
+ C.getCanonicalTemplateArgument(Arg).Profile(ID, C);
}
ConceptReference *
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 5838cf8..0cb4910 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -3621,6 +3621,15 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
return LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
});
+ case clang::X86::BI__builtin_ia32_pmulhrsw128:
+ case clang::X86::BI__builtin_ia32_pmulhrsw256:
+ case clang::X86::BI__builtin_ia32_pmulhrsw512:
+ return interp__builtin_elementwise_int_binop(
+ S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
+ return (llvm::APIntOps::mulsExtended(LHS, RHS).ashr(14) + 1)
+ .extractBits(16, 1);
+ });
+
case clang::X86::BI__builtin_ia32_pavgb128:
case clang::X86::BI__builtin_ia32_pavgw128:
case clang::X86::BI__builtin_ia32_pavgb256:
diff --git a/clang/lib/AST/ByteCode/Program.cpp b/clang/lib/AST/ByteCode/Program.cpp
index e653782..e0b2852 100644
--- a/clang/lib/AST/ByteCode/Program.cpp
+++ b/clang/lib/AST/ByteCode/Program.cpp
@@ -226,7 +226,10 @@ UnsignedOrNone Program::createGlobal(const ValueDecl *VD, const Expr *Init) {
Globals[PIdx] = NewGlobal;
// All pointers pointing to the previous extern decl now point to the
// new decl.
- RedeclBlock->movePointersTo(NewGlobal->block());
+ // A previous iteration might've already fixed up the pointers for this
+ // global.
+ if (RedeclBlock != NewGlobal->block())
+ RedeclBlock->movePointersTo(NewGlobal->block());
}
}
PIdx = *Idx;
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 16141b2..e308c17 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -11819,6 +11819,14 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
case clang::X86::BI__builtin_ia32_pavgw512:
return EvaluateBinOpExpr(llvm::APIntOps::avgCeilU);
+ case clang::X86::BI__builtin_ia32_pmulhrsw128:
+ case clang::X86::BI__builtin_ia32_pmulhrsw256:
+ case clang::X86::BI__builtin_ia32_pmulhrsw512:
+ return EvaluateBinOpExpr([](const APSInt &LHS, const APSInt &RHS) {
+ return (llvm::APIntOps::mulsExtended(LHS, RHS).ashr(14) + 1)
+ .extractBits(16, 1);
+ });
+
case clang::X86::BI__builtin_ia32_pmaddubsw128:
case clang::X86::BI__builtin_ia32_pmaddubsw256:
case clang::X86::BI__builtin_ia32_pmaddubsw512:
diff --git a/clang/lib/Analysis/ExprMutationAnalyzer.cpp b/clang/lib/Analysis/ExprMutationAnalyzer.cpp
index 1e376da..75b17c54 100644
--- a/clang/lib/Analysis/ExprMutationAnalyzer.cpp
+++ b/clang/lib/Analysis/ExprMutationAnalyzer.cpp
@@ -140,7 +140,8 @@ class ExprPointeeResolve {
// explicit cast will be checked in `findPointeeToNonConst`
const CastKind kind = ICE->getCastKind();
if (kind == CK_LValueToRValue || kind == CK_DerivedToBase ||
- kind == CK_UncheckedDerivedToBase)
+ kind == CK_UncheckedDerivedToBase ||
+ (kind == CK_NoOp && (ICE->getType() == ICE->getSubExpr()->getType())))
return resolveExpr(ICE->getSubExpr());
return false;
}
@@ -788,13 +789,16 @@ ExprMutationAnalyzer::Analyzer::findPointeeToNonConst(const Expr *Exp) {
// FIXME: false positive if the pointee does not change in lambda
const auto CaptureNoConst = lambdaExpr(hasCaptureInit(Exp));
- const auto Matches =
- match(stmt(anyOf(forEachDescendant(
- stmt(anyOf(AssignToNonConst, PassAsNonConstArg,
- CastToNonConst, CaptureNoConst))
- .bind("stmt")),
- forEachDescendant(InitToNonConst))),
- Stm, Context);
+ const auto ReturnNoConst =
+ returnStmt(hasReturnValue(canResolveToExprPointee(Exp)));
+
+ const auto Matches = match(
+ stmt(anyOf(forEachDescendant(
+ stmt(anyOf(AssignToNonConst, PassAsNonConstArg,
+ CastToNonConst, CaptureNoConst, ReturnNoConst))
+ .bind("stmt")),
+ forEachDescendant(InitToNonConst))),
+ Stm, Context);
return selectFirst<Stmt>("stmt", Matches);
}
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index d95dab3..5bf1816 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -374,10 +374,9 @@ bool AtomicInfo::emitMemSetZeroIfNecessary() const {
}
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
- Address Dest, Address Ptr,
- Address Val1, Address Val2,
- uint64_t Size,
- llvm::AtomicOrdering SuccessOrder,
+ Address Dest, Address Ptr, Address Val1,
+ Address Val2, Address ExpectedResult,
+ uint64_t Size, llvm::AtomicOrdering SuccessOrder,
llvm::AtomicOrdering FailureOrder,
llvm::SyncScope::ID Scope) {
// Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
@@ -411,8 +410,30 @@ static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
CGF.Builder.SetInsertPoint(StoreExpectedBB);
// Update the memory at Expected with Old's value.
- auto *I = CGF.Builder.CreateStore(Old, Val1);
- CGF.addInstToCurrentSourceAtom(I, Old);
+ llvm::Type *ExpectedType = ExpectedResult.getElementType();
+ const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
+ uint64_t ExpectedSizeInBytes = DL.getTypeStoreSize(ExpectedType);
+
+ if (ExpectedSizeInBytes == Size) {
+ // Sizes match: store directly
+ auto *I = CGF.Builder.CreateStore(Old, ExpectedResult);
+ CGF.addInstToCurrentSourceAtom(I, Old);
+ } else {
+ // store only the first ExpectedSizeInBytes bytes of Old
+ llvm::Type *OldType = Old->getType();
+
+ // Allocate temporary storage for Old value
+ Address OldTmp =
+ CGF.CreateTempAlloca(OldType, Ptr.getAlignment(), "old.tmp");
+
+ // Store Old into this temporary
+ auto *I = CGF.Builder.CreateStore(Old, OldTmp);
+ CGF.addInstToCurrentSourceAtom(I, Old);
+
+ // Perform memcpy for first ExpectedSizeInBytes bytes
+ CGF.Builder.CreateMemCpy(ExpectedResult, OldTmp, ExpectedSizeInBytes,
+ /*isVolatile=*/false);
+ }
// Finally, branch to the exit point.
CGF.Builder.CreateBr(ContinueBB);
@@ -425,13 +446,11 @@ static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
/// Given an ordering required on success, emit all possible cmpxchg
/// instructions to cope with the provided (but possibly only dynamically known)
/// FailureOrder.
-static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
- bool IsWeak, Address Dest, Address Ptr,
- Address Val1, Address Val2,
- llvm::Value *FailureOrderVal,
- uint64_t Size,
- llvm::AtomicOrdering SuccessOrder,
- llvm::SyncScope::ID Scope) {
+static void emitAtomicCmpXchgFailureSet(
+ CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr,
+ Address Val1, Address Val2, Address ExpectedResult,
+ llvm::Value *FailureOrderVal, uint64_t Size,
+ llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID Scope) {
llvm::AtomicOrdering FailureOrder;
if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
auto FOS = FO->getSExtValue();
@@ -458,8 +477,8 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
// success argument". This condition has been lifted and the only
// precondition is 31.7.2.18. Effectively treat this as a DR and skip
// language version checks.
- emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
- FailureOrder, Scope);
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult,
+ Size, SuccessOrder, FailureOrder, Scope);
return;
}
@@ -483,18 +502,19 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
// Emit all the different atomics
CGF.Builder.SetInsertPoint(MonotonicBB);
- emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
- Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size,
+ SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(AcquireBB);
- emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
- llvm::AtomicOrdering::Acquire, Scope);
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size,
+ SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(SeqCstBB);
- emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
- llvm::AtomicOrdering::SequentiallyConsistent, Scope);
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size,
+ SuccessOrder, llvm::AtomicOrdering::SequentiallyConsistent,
+ Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(ContBB);
@@ -538,8 +558,9 @@ static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
Address Ptr, Address Val1, Address Val2,
- llvm::Value *IsWeak, llvm::Value *FailureOrder,
- uint64_t Size, llvm::AtomicOrdering Order,
+ Address ExpectedResult, llvm::Value *IsWeak,
+ llvm::Value *FailureOrder, uint64_t Size,
+ llvm::AtomicOrdering Order,
llvm::SyncScope::ID Scope) {
llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
bool PostOpMinMax = false;
@@ -554,13 +575,15 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Order, Scope);
+ ExpectedResult, FailureOrder, Size, Order,
+ Scope);
return;
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Order, Scope);
+ ExpectedResult, FailureOrder, Size, Order,
+ Scope);
return;
case AtomicExpr::AO__atomic_compare_exchange:
case AtomicExpr::AO__atomic_compare_exchange_n:
@@ -568,7 +591,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {
if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
- Val1, Val2, FailureOrder, Size, Order, Scope);
+ Val1, Val2, ExpectedResult, FailureOrder,
+ Size, Order, Scope);
} else {
// Create all the relevant BB's
llvm::BasicBlock *StrongBB =
@@ -582,12 +606,14 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
CGF.Builder.SetInsertPoint(StrongBB);
emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Order, Scope);
+ ExpectedResult, FailureOrder, Size, Order,
+ Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(WeakBB);
emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Order, Scope);
+ ExpectedResult, FailureOrder, Size, Order,
+ Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(ContBB);
@@ -797,9 +823,9 @@ EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
Address Ptr, Address Val1, Address Val2,
- llvm::Value *IsWeak, llvm::Value *FailureOrder,
- uint64_t Size, llvm::AtomicOrdering Order,
- llvm::Value *Scope) {
+ Address OriginalVal1, llvm::Value *IsWeak,
+ llvm::Value *FailureOrder, uint64_t Size,
+ llvm::AtomicOrdering Order, llvm::Value *Scope) {
auto ScopeModel = Expr->getScopeModel();
// LLVM atomic instructions always have sync scope. If clang atomic
@@ -816,8 +842,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
Order, CGF.getLLVMContext());
else
SS = llvm::SyncScope::System;
- EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
- Order, SS);
+ EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ FailureOrder, Size, Order, SS);
return;
}
@@ -826,8 +852,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
Order, CGF.CGM.getLLVMContext());
- EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
- Order, SCID);
+ EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ FailureOrder, Size, Order, SCID);
return;
}
@@ -852,12 +878,11 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
SI->addCase(Builder.getInt32(S), B);
Builder.SetInsertPoint(B);
- EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
- Order,
- CGF.getTargetHooks().getLLVMSyncScopeID(CGF.CGM.getLangOpts(),
- ScopeModel->map(S),
- Order,
- CGF.getLLVMContext()));
+ EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ FailureOrder, Size, Order,
+ CGF.getTargetHooks().getLLVMSyncScopeID(
+ CGF.CGM.getLangOpts(), ScopeModel->map(S), Order,
+ CGF.getLLVMContext()));
Builder.CreateBr(ContBB);
}
@@ -1058,6 +1083,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
AtomicInfo Atomics(*this, AtomicVal);
+ Address OriginalVal1 = Val1;
if (ShouldCastToIntPtrTy) {
Ptr = Atomics.castToAtomicIntPointer(Ptr);
if (Val1.isValid())
@@ -1301,30 +1327,32 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
if (llvm::isValidAtomicOrderingCABI(ord))
switch ((llvm::AtomicOrderingCABI)ord) {
case llvm::AtomicOrderingCABI::relaxed:
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Monotonic, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::Monotonic, Scope);
break;
case llvm::AtomicOrderingCABI::consume:
case llvm::AtomicOrderingCABI::acquire:
if (IsStore)
break; // Avoid crashing on code with undefined behavior
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Acquire, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::Acquire, Scope);
break;
case llvm::AtomicOrderingCABI::release:
if (IsLoad)
break; // Avoid crashing on code with undefined behavior
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Release, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::Release, Scope);
break;
case llvm::AtomicOrderingCABI::acq_rel:
if (IsLoad || IsStore)
break; // Avoid crashing on code with undefined behavior
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::AcquireRelease, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::AcquireRelease,
+ Scope);
break;
case llvm::AtomicOrderingCABI::seq_cst:
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size,
llvm::AtomicOrdering::SequentiallyConsistent, Scope);
break;
}
@@ -1360,13 +1388,13 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// Emit all the different atomics
Builder.SetInsertPoint(MonotonicBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Monotonic, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail,
+ Size, llvm::AtomicOrdering::Monotonic, Scope);
Builder.CreateBr(ContBB);
if (!IsStore) {
Builder.SetInsertPoint(AcquireBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Acquire, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::Acquire, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
AcquireBB);
@@ -1375,23 +1403,23 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
}
if (!IsLoad) {
Builder.SetInsertPoint(ReleaseBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Release, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::Release, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
ReleaseBB);
}
if (!IsLoad && !IsStore) {
Builder.SetInsertPoint(AcqRelBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::AcquireRelease, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::AcquireRelease, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
AcqRelBB);
}
Builder.SetInsertPoint(SeqCstBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::SequentiallyConsistent, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail,
+ Size, llvm::AtomicOrdering::SequentiallyConsistent, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
SeqCstBB);
@@ -1417,6 +1445,11 @@ Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
if (SourceSizeInBits != AtomicSizeInBits) {
Address Tmp = CreateTempAlloca();
+ CGF.Builder.CreateMemSet(
+ Tmp.emitRawPointer(CGF), llvm::ConstantInt::get(CGF.Int8Ty, 0),
+ CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
+ Tmp.getAlignment().getAsAlign());
+
CGF.Builder.CreateMemCpy(Tmp, Addr,
std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
Addr = Tmp;
diff --git a/clang/lib/Format/BreakableToken.cpp b/clang/lib/Format/BreakableToken.cpp
index 29db200..994a427 100644
--- a/clang/lib/Format/BreakableToken.cpp
+++ b/clang/lib/Format/BreakableToken.cpp
@@ -306,8 +306,10 @@ BreakableStringLiteralUsingOperators::BreakableStringLiteralUsingOperators(
// In Verilog, all strings are quoted by double quotes, joined by commas,
// and wrapped in braces. The comma is always before the newline.
assert(QuoteStyle == DoubleQuotes);
- LeftBraceQuote = Style.Cpp11BracedListStyle ? "{\"" : "{ \"";
- RightBraceQuote = Style.Cpp11BracedListStyle ? "\"}" : "\" }";
+ LeftBraceQuote =
+ Style.Cpp11BracedListStyle != FormatStyle::BLS_Block ? "{\"" : "{ \"";
+ RightBraceQuote =
+ Style.Cpp11BracedListStyle != FormatStyle::BLS_Block ? "\"}" : "\" }";
Postfix = "\",";
Prefix = "\"";
} else {
diff --git a/clang/lib/Format/ContinuationIndenter.cpp b/clang/lib/Format/ContinuationIndenter.cpp
index cd4c1aa..37c10c6 100644
--- a/clang/lib/Format/ContinuationIndenter.cpp
+++ b/clang/lib/Format/ContinuationIndenter.cpp
@@ -411,7 +411,7 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
}
if (CurrentState.BreakBeforeClosingBrace &&
(Current.closesBlockOrBlockTypeList(Style) ||
- (Current.is(tok::r_brace) &&
+ (Current.is(tok::r_brace) && Current.MatchingParen &&
Current.isBlockIndentedInitRBrace(Style)))) {
return true;
}
@@ -433,7 +433,7 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
}
if ((startsNextParameter(Current, Style) || Previous.is(tok::semi) ||
(Previous.is(TT_TemplateCloser) && Current.is(TT_StartOfName) &&
- State.Line->First->isNot(TT_AttributeSquare) && Style.isCpp() &&
+ State.Line->First->isNot(TT_AttributeLSquare) && Style.isCpp() &&
// FIXME: This is a temporary workaround for the case where clang-format
// sets BreakBeforeParameter to avoid bin packing and this creates a
// completely unnecessary line break after a template type that isn't
@@ -833,7 +833,7 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
auto IsOpeningBracket = [&](const FormatToken &Tok) {
auto IsStartOfBracedList = [&]() {
return Tok.is(tok::l_brace) && Tok.isNot(BK_Block) &&
- Style.Cpp11BracedListStyle;
+ Style.Cpp11BracedListStyle != FormatStyle::BLS_Block;
};
if (Tok.isNoneOf(tok::l_paren, TT_TemplateOpener, tok::l_square) &&
!IsStartOfBracedList()) {
@@ -925,7 +925,12 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
TT_TableGenDAGArgOpenerToBreak) &&
!(Current.MacroParent && Previous.MacroParent) &&
(Current.isNot(TT_LineComment) ||
- Previous.isOneOf(BK_BracedInit, TT_VerilogMultiLineListLParen)) &&
+ (Previous.is(BK_BracedInit) &&
+ (Style.Cpp11BracedListStyle != FormatStyle::BLS_FunctionCall ||
+ !Previous.Previous ||
+ Previous.Previous->isNoneOf(tok::identifier, tok::l_paren,
+ BK_BracedInit))) ||
+ Previous.is(TT_VerilogMultiLineListLParen)) &&
!IsInTemplateString(Current)) {
CurrentState.Indent = State.Column + Spaces;
CurrentState.IsAligned = true;
@@ -1369,7 +1374,8 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
}
if (Current.is(TT_LambdaArrow) &&
Previous.isOneOf(tok::kw_noexcept, tok::kw_mutable, tok::kw_constexpr,
- tok::kw_consteval, tok::kw_static, TT_AttributeSquare)) {
+ tok::kw_consteval, tok::kw_static,
+ TT_AttributeRSquare)) {
return ContinuationIndent;
}
if ((Current.isOneOf(tok::r_brace, tok::r_square) ||
@@ -1494,9 +1500,10 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
Current.isNot(tok::l_paren) &&
!Current.endsSequence(TT_StartOfName, TT_AttributeMacro,
TT_PointerOrReference)) ||
- PreviousNonComment->isOneOf(
- TT_AttributeRParen, TT_AttributeSquare, TT_FunctionAnnotationRParen,
- TT_JavaAnnotation, TT_LeadingJavaAnnotation))) ||
+ PreviousNonComment->isOneOf(TT_AttributeRParen, TT_AttributeRSquare,
+ TT_FunctionAnnotationRParen,
+ TT_JavaAnnotation,
+ TT_LeadingJavaAnnotation))) ||
(!Style.IndentWrappedFunctionNames &&
NextNonComment->isOneOf(tok::kw_operator, TT_FunctionDeclarationName))) {
return std::max(CurrentState.LastSpace, CurrentState.Indent);
diff --git a/clang/lib/Format/DefinitionBlockSeparator.cpp b/clang/lib/Format/DefinitionBlockSeparator.cpp
index 3f4ce5f..855f2ef 100644
--- a/clang/lib/Format/DefinitionBlockSeparator.cpp
+++ b/clang/lib/Format/DefinitionBlockSeparator.cpp
@@ -169,7 +169,7 @@ void DefinitionBlockSeparator::separateBlocks(
}
}
- if (Style.isCSharp() && OperateLine->First->is(TT_AttributeSquare))
+ if (Style.isCSharp() && OperateLine->First->is(TT_AttributeLSquare))
return true;
return false;
};
diff --git a/clang/lib/Format/Format.cpp b/clang/lib/Format/Format.cpp
index 093e88f..edd126c 100644
--- a/clang/lib/Format/Format.cpp
+++ b/clang/lib/Format/Format.cpp
@@ -304,6 +304,18 @@ struct ScalarEnumerationTraits<FormatStyle::BreakTemplateDeclarationsStyle> {
}
};
+template <> struct ScalarEnumerationTraits<FormatStyle::BracedListStyle> {
+ static void enumeration(IO &IO, FormatStyle::BracedListStyle &Value) {
+ IO.enumCase(Value, "Block", FormatStyle::BLS_Block);
+ IO.enumCase(Value, "FunctionCall", FormatStyle::BLS_FunctionCall);
+ IO.enumCase(Value, "AlignFirstComment", FormatStyle::BLS_AlignFirstComment);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "false", FormatStyle::BLS_Block);
+ IO.enumCase(Value, "true", FormatStyle::BLS_AlignFirstComment);
+ }
+};
+
template <> struct ScalarEnumerationTraits<FormatStyle::DAGArgStyle> {
static void enumeration(IO &IO, FormatStyle::DAGArgStyle &Value) {
IO.enumCase(Value, "DontBreak", FormatStyle::DAS_DontBreak);
@@ -1628,7 +1640,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.CompactNamespaces = false;
LLVMStyle.ConstructorInitializerIndentWidth = 4;
LLVMStyle.ContinuationIndentWidth = 4;
- LLVMStyle.Cpp11BracedListStyle = true;
+ LLVMStyle.Cpp11BracedListStyle = FormatStyle::BLS_AlignFirstComment;
LLVMStyle.DerivePointerAlignment = false;
LLVMStyle.DisableFormat = false;
LLVMStyle.EmptyLineAfterAccessModifier = FormatStyle::ELAAMS_Never;
@@ -1904,7 +1916,7 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
// beneficial there. Investigate turning this on once proper string reflow
// has been implemented.
GoogleStyle.BreakStringLiterals = false;
- GoogleStyle.Cpp11BracedListStyle = false;
+ GoogleStyle.Cpp11BracedListStyle = FormatStyle::BLS_Block;
GoogleStyle.SpacesInContainerLiterals = false;
} else if (Language == FormatStyle::LK_ObjC) {
GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
@@ -2000,7 +2012,7 @@ FormatStyle getMozillaStyle() {
MozillaStyle.BreakTemplateDeclarations = FormatStyle::BTDS_Yes;
MozillaStyle.ConstructorInitializerIndentWidth = 2;
MozillaStyle.ContinuationIndentWidth = 2;
- MozillaStyle.Cpp11BracedListStyle = false;
+ MozillaStyle.Cpp11BracedListStyle = FormatStyle::BLS_Block;
MozillaStyle.FixNamespaceComments = false;
MozillaStyle.IndentCaseLabels = true;
MozillaStyle.ObjCSpaceAfterProperty = true;
@@ -2023,7 +2035,7 @@ FormatStyle getWebKitStyle() {
Style.BreakBeforeBraces = FormatStyle::BS_WebKit;
Style.BreakConstructorInitializers = FormatStyle::BCIS_BeforeComma;
Style.ColumnLimit = 0;
- Style.Cpp11BracedListStyle = false;
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_Block;
Style.FixNamespaceComments = false;
Style.IndentWidth = 4;
Style.NamespaceIndentation = FormatStyle::NI_Inner;
@@ -2043,7 +2055,7 @@ FormatStyle getGNUStyle() {
Style.BreakBeforeBraces = FormatStyle::BS_GNU;
Style.BreakBeforeTernaryOperators = true;
Style.ColumnLimit = 79;
- Style.Cpp11BracedListStyle = false;
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_Block;
Style.FixNamespaceComments = false;
Style.KeepFormFeed = true;
Style.SpaceBeforeParens = FormatStyle::SBPO_Always;
diff --git a/clang/lib/Format/FormatToken.cpp b/clang/lib/Format/FormatToken.cpp
index cb3fc1c..d1c6264 100644
--- a/clang/lib/Format/FormatToken.cpp
+++ b/clang/lib/Format/FormatToken.cpp
@@ -65,12 +65,13 @@ bool FormatToken::isTypeOrIdentifier(const LangOptions &LangOpts) const {
bool FormatToken::isBlockIndentedInitRBrace(const FormatStyle &Style) const {
assert(is(tok::r_brace));
- if (!Style.Cpp11BracedListStyle ||
+ assert(MatchingParen);
+ assert(MatchingParen->is(tok::l_brace));
+ if (Style.Cpp11BracedListStyle == FormatStyle::BLS_Block ||
Style.AlignAfterOpenBracket != FormatStyle::BAS_BlockIndent) {
return false;
}
const auto *LBrace = MatchingParen;
- assert(LBrace && LBrace->is(tok::l_brace));
if (LBrace->is(BK_BracedInit))
return true;
if (LBrace->Previous && LBrace->Previous->is(tok::equal))
@@ -87,7 +88,8 @@ bool FormatToken::opensBlockOrBlockTypeList(const FormatStyle &Style) const {
return is(TT_ArrayInitializerLSquare) || is(TT_ProtoExtensionLSquare) ||
(is(tok::l_brace) &&
(getBlockKind() == BK_Block || is(TT_DictLiteral) ||
- (!Style.Cpp11BracedListStyle && NestingLevel == 0))) ||
+ (Style.Cpp11BracedListStyle == FormatStyle::BLS_Block &&
+ NestingLevel == 0))) ||
(is(tok::less) && Style.isProto());
}
@@ -183,7 +185,8 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
// In C++11 braced list style, we should not format in columns unless they
// have many items (20 or more) or we allow bin-packing of function call
// arguments.
- if (Style.Cpp11BracedListStyle && !Style.BinPackArguments &&
+ if (Style.Cpp11BracedListStyle != FormatStyle::BLS_Block &&
+ !Style.BinPackArguments &&
(Commas.size() < 19 || !Style.BinPackLongBracedList)) {
return;
}
@@ -227,7 +230,7 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
ItemEnd = Token->MatchingParen;
const FormatToken *NonCommentEnd = ItemEnd->getPreviousNonComment();
ItemLengths.push_back(CodePointsBetween(ItemBegin, NonCommentEnd));
- if (Style.Cpp11BracedListStyle &&
+ if (Style.Cpp11BracedListStyle != FormatStyle::BLS_Block &&
!ItemEnd->Previous->isTrailingComment()) {
// In Cpp11 braced list style, the } and possibly other subsequent
// tokens will need to stay on a line with the last element.
diff --git a/clang/lib/Format/FormatToken.h b/clang/lib/Format/FormatToken.h
index f015d27..6f3d24a 100644
--- a/clang/lib/Format/FormatToken.h
+++ b/clang/lib/Format/FormatToken.h
@@ -30,9 +30,10 @@ namespace format {
TYPE(ArraySubscriptLSquare) \
TYPE(AttributeColon) \
TYPE(AttributeLParen) \
+ TYPE(AttributeLSquare) \
TYPE(AttributeMacro) \
TYPE(AttributeRParen) \
- TYPE(AttributeSquare) \
+ TYPE(AttributeRSquare) \
TYPE(BinaryOperator) \
TYPE(BitFieldColon) \
TYPE(BlockComment) \
diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp
index 5b784ed..c97a9e8 100644
--- a/clang/lib/Format/TokenAnnotator.cpp
+++ b/clang/lib/Format/TokenAnnotator.cpp
@@ -531,10 +531,6 @@ private:
OpeningParen.Previous->is(TT_LeadingJavaAnnotation)) {
CurrentToken->setType(TT_LeadingJavaAnnotation);
}
- if (OpeningParen.Previous &&
- OpeningParen.Previous->is(TT_AttributeSquare)) {
- CurrentToken->setType(TT_AttributeSquare);
- }
if (!HasMultipleLines)
OpeningParen.setPackingKind(PPK_Inconclusive);
@@ -722,9 +718,11 @@ private:
} else if (InsideInlineASM) {
Left->setType(TT_InlineASMSymbolicNameLSquare);
} else if (IsCpp11AttributeSpecifier) {
- Left->setType(TT_AttributeSquare);
- if (!IsInnerSquare && Left->Previous)
- Left->Previous->EndsCppAttributeGroup = false;
+ if (!IsInnerSquare) {
+ Left->setType(TT_AttributeLSquare);
+ if (Left->Previous)
+ Left->Previous->EndsCppAttributeGroup = false;
+ }
} else if (Style.isJavaScript() && Parent &&
Contexts.back().ContextKind == tok::l_brace &&
Parent->isOneOf(tok::l_brace, tok::comma)) {
@@ -733,7 +731,7 @@ private:
Parent && Parent->isOneOf(tok::l_brace, tok::comma)) {
Left->setType(TT_DesignatedInitializerLSquare);
} else if (IsCSharpAttributeSpecifier) {
- Left->setType(TT_AttributeSquare);
+ Left->setType(TT_AttributeLSquare);
} else if (CurrentToken->is(tok::r_square) && Parent &&
Parent->is(TT_TemplateCloser)) {
Left->setType(TT_ArraySubscriptLSquare);
@@ -797,13 +795,12 @@ private:
while (CurrentToken) {
if (CurrentToken->is(tok::r_square)) {
- if (IsCpp11AttributeSpecifier) {
- CurrentToken->setType(TT_AttributeSquare);
- if (!IsInnerSquare)
- CurrentToken->EndsCppAttributeGroup = true;
+ if (IsCpp11AttributeSpecifier && !IsInnerSquare) {
+ CurrentToken->setType(TT_AttributeRSquare);
+ CurrentToken->EndsCppAttributeGroup = true;
}
if (IsCSharpAttributeSpecifier) {
- CurrentToken->setType(TT_AttributeSquare);
+ CurrentToken->setType(TT_AttributeRSquare);
} else if (((CurrentToken->Next &&
CurrentToken->Next->is(tok::l_paren)) ||
(CurrentToken->Previous &&
@@ -1297,7 +1294,7 @@ private:
bool consumeToken() {
if (IsCpp) {
const auto *Prev = CurrentToken->getPreviousNonComment();
- if (Prev && Prev->is(tok::r_square) && Prev->is(TT_AttributeSquare) &&
+ if (Prev && Prev->is(TT_AttributeRSquare) &&
CurrentToken->isOneOf(tok::kw_if, tok::kw_switch, tok::kw_case,
tok::kw_default, tok::kw_for, tok::kw_while) &&
mustBreakAfterAttributes(*CurrentToken, Style)) {
@@ -2850,7 +2847,7 @@ private:
T = Tok->Previous;
continue;
}
- } else if (T->is(TT_AttributeSquare)) {
+ } else if (T->is(TT_AttributeRSquare)) {
// Handle `x = (foo *[[clang::foo]])&v;`:
if (T->MatchingParen && T->MatchingParen->Previous) {
T = T->MatchingParen->Previous;
@@ -3656,7 +3653,7 @@ static FormatToken *getFunctionName(const AnnotatedLine &Line,
for (FormatToken *Tok = Line.getFirstNonComment(), *Name = nullptr; Tok;
Tok = Tok->getNextNonComment()) {
// Skip C++11 attributes both before and after the function name.
- if (Tok->is(tok::l_square) && Tok->is(TT_AttributeSquare)) {
+ if (Tok->is(TT_AttributeLSquare)) {
Tok = Tok->MatchingParen;
if (!Tok)
break;
@@ -3794,18 +3791,12 @@ static bool isFunctionDeclarationName(const LangOptions &LangOpts,
if (Current.is(TT_FunctionDeclarationName))
return true;
- if (!Current.Tok.getIdentifierInfo())
+ if (Current.isNoneOf(tok::identifier, tok::kw_operator))
return false;
const auto *Prev = Current.getPreviousNonComment();
assert(Prev);
- if (Prev->is(tok::coloncolon))
- Prev = Prev->Previous;
-
- if (!Prev)
- return false;
-
const auto &Previous = *Prev;
if (const auto *PrevPrev = Previous.getPreviousNonComment();
@@ -3854,6 +3845,8 @@ static bool isFunctionDeclarationName(const LangOptions &LangOpts,
// Find parentheses of parameter list.
if (Current.is(tok::kw_operator)) {
+ if (Line.startsWith(tok::kw_friend))
+ return true;
if (Previous.Tok.getIdentifierInfo() &&
Previous.isNoneOf(tok::kw_return, tok::kw_co_return)) {
return true;
@@ -4098,7 +4091,8 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) const {
if (Current->is(TT_LineComment)) {
if (Prev->is(BK_BracedInit) && Prev->opensScope()) {
Current->SpacesRequiredBefore =
- (Style.Cpp11BracedListStyle && !Style.SpacesInParensOptions.Other)
+ (Style.Cpp11BracedListStyle == FormatStyle::BLS_AlignFirstComment &&
+ !Style.SpacesInParensOptions.Other)
? 0
: 1;
} else if (Prev->is(TT_VerilogMultiLineListLParen)) {
@@ -4328,7 +4322,7 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
return 35;
if (Right.isNoneOf(TT_ObjCMethodExpr, TT_LambdaLSquare,
TT_ArrayInitializerLSquare,
- TT_DesignatedInitializerLSquare, TT_AttributeSquare)) {
+ TT_DesignatedInitializerLSquare, TT_AttributeLSquare)) {
return 500;
}
}
@@ -4449,8 +4443,10 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
(Left.ParameterCount <= 1 || Style.AllowAllArgumentsOnNextLine)) {
return 0;
}
- if (Left.is(tok::l_brace) && !Style.Cpp11BracedListStyle)
+ if (Left.is(tok::l_brace) &&
+ Style.Cpp11BracedListStyle == FormatStyle::BLS_Block) {
return 19;
+ }
return Left.ParameterCount > 1 ? Style.PenaltyBreakBeforeFirstCallParameter
: 19;
}
@@ -4616,7 +4612,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
// Format empty list as `<>`.
if (Left.is(tok::less) && Right.is(tok::greater))
return false;
- return !Style.Cpp11BracedListStyle;
+ return Style.Cpp11BracedListStyle == FormatStyle::BLS_Block;
}
// Don't attempt to format operator<(), as it is handled later.
if (Right.isNot(TT_OverloadedOperatorLParen))
@@ -4784,7 +4780,8 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
const auto SpaceRequiredForArrayInitializerLSquare =
[](const FormatToken &LSquareTok, const FormatStyle &Style) {
return Style.SpacesInContainerLiterals ||
- (Style.isProto() && !Style.Cpp11BracedListStyle &&
+ (Style.isProto() &&
+ Style.Cpp11BracedListStyle == FormatStyle::BLS_Block &&
LSquareTok.endsSequence(tok::l_square, tok::colon,
TT_SelectorName));
};
@@ -4808,7 +4805,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Right.is(tok::l_square) &&
Right.isNoneOf(TT_ObjCMethodExpr, TT_LambdaLSquare,
TT_DesignatedInitializerLSquare,
- TT_StructuredBindingLSquare, TT_AttributeSquare) &&
+ TT_StructuredBindingLSquare, TT_AttributeLSquare) &&
Left.isNoneOf(tok::numeric_constant, TT_DictLiteral) &&
!(Left.isNot(tok::r_square) && Style.SpaceBeforeSquareBrackets &&
Right.is(TT_ArraySubscriptLSquare))) {
@@ -4817,7 +4814,8 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if ((Left.is(tok::l_brace) && Left.isNot(BK_Block)) ||
(Right.is(tok::r_brace) && Right.MatchingParen &&
Right.MatchingParen->isNot(BK_Block))) {
- return !Style.Cpp11BracedListStyle || Style.SpacesInParensOptions.Other;
+ return Style.Cpp11BracedListStyle == FormatStyle::BLS_Block ||
+ Style.SpacesInParensOptions.Other;
}
if (Left.is(TT_BlockComment)) {
// No whitespace in x(/*foo=*/1), except for JavaScript.
@@ -4826,7 +4824,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
// Space between template and attribute.
// e.g. template <typename T> [[nodiscard]] ...
- if (Left.is(TT_TemplateCloser) && Right.is(TT_AttributeSquare))
+ if (Left.is(TT_TemplateCloser) && Right.is(TT_AttributeLSquare))
return true;
// Space before parentheses common for all languages
if (Right.is(tok::l_paren)) {
@@ -4841,10 +4839,8 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return Style.SpaceBeforeParensOptions.AfterRequiresInExpression ||
spaceRequiredBeforeParens(Right);
}
- if (Left.is(TT_AttributeRParen) ||
- (Left.is(tok::r_square) && Left.is(TT_AttributeSquare))) {
+ if (Left.isOneOf(TT_AttributeRParen, TT_AttributeRSquare))
return true;
- }
if (Left.is(TT_ForEachMacro)) {
return Style.SpaceBeforeParensOptions.AfterForeachMacros ||
spaceRequiredBeforeParens(Right);
@@ -4999,7 +4995,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
Left.Children.empty()) {
if (Left.is(BK_Block))
return Style.SpaceInEmptyBraces != FormatStyle::SIEB_Never;
- if (Style.Cpp11BracedListStyle) {
+ if (Style.Cpp11BracedListStyle != FormatStyle::BLS_Block) {
return Style.SpacesInParens == FormatStyle::SIPO_Custom &&
Style.SpacesInParensOptions.InEmptyParentheses;
}
@@ -5081,7 +5077,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Left.MatchingParen &&
Left.MatchingParen->is(TT_ProtoExtensionLSquare) &&
Right.isOneOf(tok::l_brace, tok::less)) {
- return !Style.Cpp11BracedListStyle;
+ return Style.Cpp11BracedListStyle == FormatStyle::BLS_Block;
}
// A percent is probably part of a formatting specification, such as %lld.
if (Left.is(tok::percent))
@@ -5521,7 +5517,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Left.is(tok::greater) && Right.is(tok::greater)) {
if (Style.isTextProto() ||
(Style.Language == FormatStyle::LK_Proto && Left.is(TT_DictLiteral))) {
- return !Style.Cpp11BracedListStyle;
+ return Style.Cpp11BracedListStyle == FormatStyle::BLS_Block;
}
return Right.is(TT_TemplateCloser) && Left.is(TT_TemplateCloser) &&
((Style.Standard < FormatStyle::LS_Cpp11) ||
@@ -5662,16 +5658,14 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
}
// Break after C# [...] and before public/protected/private/internal.
- if (Left.is(TT_AttributeSquare) && Left.is(tok::r_square) &&
+ if (Left.is(TT_AttributeRSquare) &&
(Right.isAccessSpecifier(/*ColonRequired=*/false) ||
Right.is(Keywords.kw_internal))) {
return true;
}
// Break between ] and [ but only when there are really 2 attributes.
- if (Left.is(TT_AttributeSquare) && Right.is(TT_AttributeSquare) &&
- Left.is(tok::r_square) && Right.is(tok::l_square)) {
+ if (Left.is(TT_AttributeRSquare) && Right.is(TT_AttributeLSquare))
return true;
- }
} else if (Style.isJavaScript()) {
// FIXME: This might apply to other languages and token kinds.
if (Right.is(tok::string_literal) && Left.is(tok::plus) && BeforeLeft &&
@@ -6382,7 +6376,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
return false;
}
if (Left.is(tok::equal) && Right.is(tok::l_brace) &&
- !Style.Cpp11BracedListStyle) {
+ Style.Cpp11BracedListStyle == FormatStyle::BLS_Block) {
return false;
}
if (Left.is(TT_AttributeLParen) ||
@@ -6411,8 +6405,10 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
if (Right.isAttribute())
return true;
- if (Right.is(tok::l_square) && Right.is(TT_AttributeSquare))
- return Left.isNot(TT_AttributeSquare);
+ if (Right.is(TT_AttributeLSquare)) {
+ assert(Left.isNot(tok::l_square));
+ return true;
+ }
if (Left.is(tok::identifier) && Right.is(tok::string_literal))
return true;
@@ -6453,8 +6449,12 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
Left.getPrecedence() == prec::Assignment)) {
return true;
}
- if ((Left.is(TT_AttributeSquare) && Right.is(tok::l_square)) ||
- (Left.is(tok::r_square) && Right.is(TT_AttributeSquare))) {
+ if (Left.is(TT_AttributeLSquare) && Right.is(tok::l_square)) {
+ assert(Right.isNot(TT_AttributeLSquare));
+ return false;
+ }
+ if (Left.is(tok::r_square) && Right.is(TT_AttributeRSquare)) {
+ assert(Left.isNot(TT_AttributeRSquare));
return false;
}
diff --git a/clang/lib/Format/WhitespaceManager.cpp b/clang/lib/Format/WhitespaceManager.cpp
index 7348a3a..b004d73 100644
--- a/clang/lib/Format/WhitespaceManager.cpp
+++ b/clang/lib/Format/WhitespaceManager.cpp
@@ -432,7 +432,11 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
// right-justified. It is used to align compound assignments like `+=` and `=`.
// When RightJustify and ACS.PadOperators are true, operators in each block to
// be aligned will be padded on the left to the same length before aligning.
-template <typename F>
+//
+// The simple check will not look at the indentaion and nesting level to recurse
+// into the line for alignment. It will also not count the commas. This is e.g.
+// for aligning macro definitions.
+template <typename F, bool SimpleCheck = false>
static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
SmallVector<WhitespaceManager::Change, 16> &Changes,
unsigned StartAt,
@@ -465,9 +469,9 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
// Measure the scope level (i.e. depth of (), [], {}) of the first token, and
// abort when we hit any token in a higher scope than the starting one.
- auto IndentAndNestingLevel = StartAt < Changes.size()
- ? Changes[StartAt].indentAndNestingLevel()
- : std::tuple<unsigned, unsigned, unsigned>();
+ const auto IndentAndNestingLevel =
+ StartAt < Changes.size() ? Changes[StartAt].indentAndNestingLevel()
+ : std::tuple<unsigned, unsigned, unsigned>();
// Keep track of the number of commas before the matching tokens, we will only
// align a sequence of matching tokens if they are preceded by the same number
@@ -536,14 +540,17 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
if (CurrentChange.Tok->isNot(tok::comment))
LineIsComment = false;
- if (CurrentChange.Tok->is(tok::comma)) {
- ++CommasBeforeMatch;
- } else if (CurrentChange.indentAndNestingLevel() > IndentAndNestingLevel) {
- // Call AlignTokens recursively, skipping over this scope block.
- unsigned StoppedAt =
- AlignTokens(Style, Matches, Changes, i, ACS, RightJustify);
- i = StoppedAt - 1;
- continue;
+ if (!SimpleCheck) {
+ if (CurrentChange.Tok->is(tok::comma)) {
+ ++CommasBeforeMatch;
+ } else if (CurrentChange.indentAndNestingLevel() >
+ IndentAndNestingLevel) {
+ // Call AlignTokens recursively, skipping over this scope block.
+ const auto StoppedAt =
+ AlignTokens(Style, Matches, Changes, i, ACS, RightJustify);
+ i = StoppedAt - 1;
+ continue;
+ }
}
if (!Matches(CurrentChange))
@@ -656,7 +663,7 @@ void WhitespaceManager::alignConsecutiveMacros() {
auto AlignMacrosMatches = [](const Change &C) {
const FormatToken *Current = C.Tok;
- unsigned SpacesRequiredBefore = 1;
+ assert(Current);
if (Current->SpacesRequiredBefore == 0 || !Current->Previous)
return false;
@@ -665,79 +672,26 @@ void WhitespaceManager::alignConsecutiveMacros() {
// If token is a ")", skip over the parameter list, to the
// token that precedes the "("
- if (Current->is(tok::r_paren) && Current->MatchingParen) {
- Current = Current->MatchingParen->Previous;
- SpacesRequiredBefore = 0;
- }
-
- if (!Current || Current->isNot(tok::identifier))
- return false;
-
- if (!Current->Previous || Current->Previous->isNot(tok::pp_define))
- return false;
-
- // For a macro function, 0 spaces are required between the
- // identifier and the lparen that opens the parameter list.
- // For a simple macro, 1 space is required between the
- // identifier and the first token of the defined value.
- return Current->Next->SpacesRequiredBefore == SpacesRequiredBefore;
- };
-
- unsigned MinColumn = 0;
-
- // Start and end of the token sequence we're processing.
- unsigned StartOfSequence = 0;
- unsigned EndOfSequence = 0;
-
- // Whether a matching token has been found on the current line.
- bool FoundMatchOnLine = false;
-
- // Whether the current line consists only of comments
- bool LineIsComment = true;
-
- unsigned I = 0;
- for (unsigned E = Changes.size(); I != E; ++I) {
- if (Changes[I].NewlinesBefore != 0) {
- EndOfSequence = I;
-
- // Whether to break the alignment sequence because of an empty line.
- bool EmptyLineBreak = (Changes[I].NewlinesBefore > 1) &&
- !Style.AlignConsecutiveMacros.AcrossEmptyLines;
-
- // Whether to break the alignment sequence because of a line without a
- // match.
- bool NoMatchBreak =
- !FoundMatchOnLine &&
- !(LineIsComment && Style.AlignConsecutiveMacros.AcrossComments);
-
- if (EmptyLineBreak || NoMatchBreak) {
- AlignMatchingTokenSequence(StartOfSequence, EndOfSequence, MinColumn,
- AlignMacrosMatches, Changes);
+ if (Current->is(tok::r_paren)) {
+ const auto *MatchingParen = Current->MatchingParen;
+ // For a macro function, 0 spaces are required between the
+ // identifier and the lparen that opens the parameter list.
+ if (!MatchingParen || MatchingParen->SpacesRequiredBefore > 0 ||
+ !MatchingParen->Previous) {
+ return false;
}
-
- // A new line starts, re-initialize line status tracking bools.
- FoundMatchOnLine = false;
- LineIsComment = true;
+ Current = MatchingParen->Previous;
+ } else if (Current->Next->SpacesRequiredBefore != 1) {
+ // For a simple macro, 1 space is required between the
+ // identifier and the first token of the defined value.
+ return false;
}
- if (Changes[I].Tok->isNot(tok::comment))
- LineIsComment = false;
-
- if (!AlignMacrosMatches(Changes[I]))
- continue;
-
- FoundMatchOnLine = true;
-
- if (StartOfSequence == 0)
- StartOfSequence = I;
-
- unsigned ChangeMinColumn = Changes[I].StartOfTokenColumn;
- MinColumn = std::max(MinColumn, ChangeMinColumn);
- }
+ return Current->endsSequence(tok::identifier, tok::pp_define);
+ };
- EndOfSequence = I;
- AlignMatchingTokenSequence(StartOfSequence, EndOfSequence, MinColumn,
- AlignMacrosMatches, Changes);
+ AlignTokens<decltype(AlignMacrosMatches) &, /*SimpleCheck=*/true>(
+ Style, AlignMacrosMatches, Changes, 0, Style.AlignConsecutiveMacros);
}
void WhitespaceManager::alignConsecutiveAssignments() {
@@ -1238,7 +1192,8 @@ void WhitespaceManager::alignArrayInitializersRightJustified(
if (!CellDescs.isRectangular())
return;
- const int BracePadding = Style.Cpp11BracedListStyle ? 0 : 1;
+ const int BracePadding =
+ Style.Cpp11BracedListStyle != FormatStyle::BLS_Block ? 0 : 1;
auto &Cells = CellDescs.Cells;
// Now go through and fixup the spaces.
auto *CellIter = Cells.begin();
@@ -1314,7 +1269,8 @@ void WhitespaceManager::alignArrayInitializersLeftJustified(
if (!CellDescs.isRectangular())
return;
- const int BracePadding = Style.Cpp11BracedListStyle ? 0 : 1;
+ const int BracePadding =
+ Style.Cpp11BracedListStyle != FormatStyle::BLS_Block ? 0 : 1;
auto &Cells = CellDescs.Cells;
// Now go through and fixup the spaces.
auto *CellIter = Cells.begin();
diff --git a/clang/lib/Headers/avx2intrin.h b/clang/lib/Headers/avx2intrin.h
index fa7f4c2..d35bc0e 100644
--- a/clang/lib/Headers/avx2intrin.h
+++ b/clang/lib/Headers/avx2intrin.h
@@ -1650,9 +1650,8 @@ _mm256_mul_epi32(__m256i __a, __m256i __b) {
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the rounded products.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mulhrs_epi16(__m256i __a, __m256i __b)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mulhrs_epi16(__m256i __a, __m256i __b) {
return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)__a, (__v16hi)__b);
}
@@ -1670,8 +1669,7 @@ _mm256_mulhrs_epi16(__m256i __a, __m256i __b)
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_mulhi_epu16(__m256i __a, __m256i __b)
-{
+_mm256_mulhi_epu16(__m256i __a, __m256i __b) {
return (__m256i)__builtin_ia32_pmulhuw256((__v16hu)__a, (__v16hu)__b);
}
diff --git a/clang/lib/Headers/avx512bwintrin.h b/clang/lib/Headers/avx512bwintrin.h
index 23b2d29..ac75b6c 100644
--- a/clang/lib/Headers/avx512bwintrin.h
+++ b/clang/lib/Headers/avx512bwintrin.h
@@ -1003,23 +1003,20 @@ _mm512_maskz_permutex2var_epi16(__mmask32 __U, __m512i __A, __m512i __I,
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mulhrs_epi16(__m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mulhrs_epi16(__m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_pmulhrsw512((__v32hi)__A, (__v32hi)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mulhrs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_mulhrs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_mulhrs_epi16(__A, __B),
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mulhrs_epi16(__mmask32 __U, __m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_maskz_mulhrs_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_mulhrs_epi16(__A, __B),
(__v32hi)_mm512_setzero_si512());
diff --git a/clang/lib/Headers/avx512vlbwintrin.h b/clang/lib/Headers/avx512vlbwintrin.h
index 639fb60..0fcfe37 100644
--- a/clang/lib/Headers/avx512vlbwintrin.h
+++ b/clang/lib/Headers/avx512vlbwintrin.h
@@ -1510,28 +1510,28 @@ _mm256_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A)
__builtin_ia32_pmovuswb256mem_mask ((__v16qi*) __P, (__v16hi) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_mulhrs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_mulhrs_epi16(__X, __Y),
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_maskz_mulhrs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_mulhrs_epi16(__X, __Y),
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_mulhrs_epi16(__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_mulhrs_epi16(__X, __Y),
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_mulhrs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_mulhrs_epi16(__X, __Y),
diff --git a/clang/lib/Headers/tmmintrin.h b/clang/lib/Headers/tmmintrin.h
index ee96caa..5d0f20f 100644
--- a/clang/lib/Headers/tmmintrin.h
+++ b/clang/lib/Headers/tmmintrin.h
@@ -544,8 +544,8 @@ _mm_maddubs_pi16(__m64 __a, __m64 __b) {
/// A 128-bit vector of [8 x i16] containing one of the source operands.
/// \returns A 128-bit vector of [8 x i16] containing the rounded and scaled
/// products of both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhrs_epi16(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_mulhrs_epi16(__m128i __a, __m128i __b) {
return (__m128i)__builtin_ia32_pmulhrsw128((__v8hi)__a, (__v8hi)__b);
}
@@ -563,11 +563,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhrs_epi16(__m128i __a,
/// A 64-bit vector of [4 x i16] containing one of the source operands.
/// \returns A 64-bit vector of [4 x i16] containing the rounded and scaled
/// products of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_mulhrs_pi16(__m64 __a, __m64 __b)
-{
- return __trunc64(__builtin_ia32_pmulhrsw128((__v8hi)__anyext128(__a),
- (__v8hi)__anyext128(__b)));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_mulhrs_pi16(__m64 __a, __m64 __b) {
+ return __trunc64(__builtin_ia32_pmulhrsw128((__v8hi)__zext128(__a),
+ (__v8hi)__zext128(__b)));
}
/// Copies the 8-bit integers from a 128-bit integer vector to the
diff --git a/clang/lib/Parse/ParseTemplate.cpp b/clang/lib/Parse/ParseTemplate.cpp
index dbc7cbc..330a9c6 100644
--- a/clang/lib/Parse/ParseTemplate.cpp
+++ b/clang/lib/Parse/ParseTemplate.cpp
@@ -533,6 +533,12 @@ bool Parser::isTypeConstraintAnnotation() {
bool Parser::TryAnnotateTypeConstraint() {
if (!getLangOpts().CPlusPlus20)
return false;
+ // The type constraint may declare template parameters, notably
+ // if it contains a generic lambda, so we need to increment
+ // the template depth as these parameters would not be instantiated
+ // at the current depth.
+ TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
+ ++CurTemplateDepthTracker;
CXXScopeSpec SS;
bool WasScopeAnnotation = Tok.is(tok::annot_cxxscope);
if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 652527a..ef1be23 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -12309,13 +12309,20 @@ static void DiagnoseMixedUnicodeImplicitConversion(Sema &S, const Type *Source,
SourceLocation CC) {
assert(Source->isUnicodeCharacterType() && Target->isUnicodeCharacterType() &&
Source != Target);
+
+ // Lone surrogates have a distinct representation in UTF-32.
+ // Converting between UTF-16 and UTF-32 codepoints seems very widespread,
+ // so don't warn on such conversion.
+ if (Source->isChar16Type() && Target->isChar32Type())
+ return;
+
Expr::EvalResult Result;
if (E->EvaluateAsInt(Result, S.getASTContext(), Expr::SE_AllowSideEffects,
S.isConstantEvaluatedContext())) {
llvm::APSInt Value(32);
Value = Result.Val.getInt();
bool IsASCII = Value <= 0x7F;
- bool IsBMP = Value <= 0xD7FF || (Value >= 0xE000 && Value <= 0xFFFF);
+ bool IsBMP = Value <= 0xDFFF || (Value >= 0xE000 && Value <= 0xFFFF);
bool ConversionPreservesSemantics =
IsASCII || (!Source->isChar8Type() && !Target->isChar8Type() && IsBMP);
diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp
index 04a73181..54cbfe4 100644
--- a/clang/lib/Sema/SemaConcept.cpp
+++ b/clang/lib/Sema/SemaConcept.cpp
@@ -432,7 +432,7 @@ private:
// XXX: It is SLOW! Use it very carefully.
std::optional<MultiLevelTemplateArgumentList> SubstitutionInTemplateArguments(
const NormalizedConstraintWithParamMapping &Constraint,
- MultiLevelTemplateArgumentList MLTAL,
+ const MultiLevelTemplateArgumentList &MLTAL,
llvm::SmallVector<TemplateArgument> &SubstitutedOuterMost);
ExprResult EvaluateSlow(const AtomicConstraint &Constraint,
@@ -564,12 +564,17 @@ ExprResult ConstraintSatisfactionChecker::EvaluateAtomicConstraint(
std::optional<MultiLevelTemplateArgumentList>
ConstraintSatisfactionChecker::SubstitutionInTemplateArguments(
const NormalizedConstraintWithParamMapping &Constraint,
- MultiLevelTemplateArgumentList MLTAL,
- llvm::SmallVector<TemplateArgument> &SubstitutedOuterMost) {
+ const MultiLevelTemplateArgumentList &MLTAL,
+ llvm::SmallVector<TemplateArgument> &SubstitutedOutermost) {
if (!Constraint.hasParameterMapping())
return std::move(MLTAL);
+ // The mapping is empty, meaning no template arguments are needed for
+ // evaluation.
+ if (Constraint.getParameterMapping().empty())
+ return MultiLevelTemplateArgumentList();
+
TemplateDeductionInfo Info(Constraint.getBeginLoc());
Sema::InstantiatingTemplate Inst(
S, Constraint.getBeginLoc(),
@@ -607,7 +612,7 @@ ConstraintSatisfactionChecker::SubstitutionInTemplateArguments(
// The empty MLTAL situation should only occur when evaluating non-dependent
// constraints.
if (MLTAL.getNumSubstitutedLevels())
- SubstitutedOuterMost =
+ SubstitutedOutermost =
llvm::to_vector_of<TemplateArgument>(MLTAL.getOutermost());
unsigned Offset = 0;
for (unsigned I = 0, MappedIndex = 0; I < Used.size(); I++) {
@@ -615,19 +620,19 @@ ConstraintSatisfactionChecker::SubstitutionInTemplateArguments(
if (Used[I])
Arg = S.Context.getCanonicalTemplateArgument(
CTAI.SugaredConverted[MappedIndex++]);
- if (I < SubstitutedOuterMost.size()) {
- SubstitutedOuterMost[I] = Arg;
+ if (I < SubstitutedOutermost.size()) {
+ SubstitutedOutermost[I] = Arg;
Offset = I + 1;
} else {
- SubstitutedOuterMost.push_back(Arg);
- Offset = SubstitutedOuterMost.size();
+ SubstitutedOutermost.push_back(Arg);
+ Offset = SubstitutedOutermost.size();
}
}
- if (Offset < SubstitutedOuterMost.size())
- SubstitutedOuterMost.erase(SubstitutedOuterMost.begin() + Offset);
+ if (Offset < SubstitutedOutermost.size())
+ SubstitutedOutermost.erase(SubstitutedOutermost.begin() + Offset);
MultiLevelTemplateArgumentList SubstitutedTemplateArgs;
- SubstitutedTemplateArgs.addOuterTemplateArguments(TD, SubstitutedOuterMost,
+ SubstitutedTemplateArgs.addOuterTemplateArguments(TD, SubstitutedOutermost,
/*Final=*/false);
return std::move(SubstitutedTemplateArgs);
}
@@ -636,9 +641,9 @@ ExprResult ConstraintSatisfactionChecker::EvaluateSlow(
const AtomicConstraint &Constraint,
const MultiLevelTemplateArgumentList &MLTAL) {
- llvm::SmallVector<TemplateArgument> SubstitutedOuterMost;
+ llvm::SmallVector<TemplateArgument> SubstitutedOutermost;
std::optional<MultiLevelTemplateArgumentList> SubstitutedArgs =
- SubstitutionInTemplateArguments(Constraint, MLTAL, SubstitutedOuterMost);
+ SubstitutionInTemplateArguments(Constraint, MLTAL, SubstitutedOutermost);
if (!SubstitutedArgs) {
Satisfaction.IsSatisfied = false;
return ExprEmpty();
@@ -736,8 +741,9 @@ ExprResult ConstraintSatisfactionChecker::Evaluate(
UnsubstitutedConstraintSatisfactionCacheResult Cache;
Cache.Satisfaction.ContainsErrors = Satisfaction.ContainsErrors;
Cache.Satisfaction.IsSatisfied = Satisfaction.IsSatisfied;
- std::copy(Satisfaction.Details.begin() + Size, Satisfaction.Details.end(),
- std::back_inserter(Cache.Satisfaction.Details));
+ Cache.Satisfaction.Details.insert(Cache.Satisfaction.Details.end(),
+ Satisfaction.Details.begin() + Size,
+ Satisfaction.Details.end());
Cache.SubstExpr = E;
S.UnsubstitutedConstraintSatisfactionCache.insert({ID, std::move(Cache)});
@@ -786,13 +792,13 @@ ExprResult ConstraintSatisfactionChecker::EvaluateSlow(
FoldExpandedConstraint::FoldOperatorKind::And;
unsigned EffectiveDetailEndIndex = Satisfaction.Details.size();
- llvm::SmallVector<TemplateArgument> SubstitutedOuterMost;
+ llvm::SmallVector<TemplateArgument> SubstitutedOutermost;
// FIXME: Is PackSubstitutionIndex correct?
llvm::SaveAndRestore _(PackSubstitutionIndex, S.ArgPackSubstIndex);
std::optional<MultiLevelTemplateArgumentList> SubstitutedArgs =
SubstitutionInTemplateArguments(
static_cast<const NormalizedConstraintWithParamMapping &>(Constraint),
- MLTAL, SubstitutedOuterMost);
+ MLTAL, SubstitutedOutermost);
if (!SubstitutedArgs) {
Satisfaction.IsSatisfied = false;
return ExprError();
@@ -868,8 +874,9 @@ ExprResult ConstraintSatisfactionChecker::Evaluate(
UnsubstitutedConstraintSatisfactionCacheResult Cache;
Cache.Satisfaction.ContainsErrors = Satisfaction.ContainsErrors;
Cache.Satisfaction.IsSatisfied = Satisfaction.IsSatisfied;
- std::copy(Satisfaction.Details.begin() + Size, Satisfaction.Details.end(),
- std::back_inserter(Cache.Satisfaction.Details));
+ Cache.Satisfaction.Details.insert(Cache.Satisfaction.Details.end(),
+ Satisfaction.Details.begin() + Size,
+ Satisfaction.Details.end());
Cache.SubstExpr = E;
S.UnsubstitutedConstraintSatisfactionCache.insert({ID, std::move(Cache)});
return E;
@@ -880,9 +887,9 @@ ExprResult ConstraintSatisfactionChecker::EvaluateSlow(
const MultiLevelTemplateArgumentList &MLTAL, unsigned Size) {
const ConceptReference *ConceptId = Constraint.getConceptId();
- llvm::SmallVector<TemplateArgument> SubstitutedOuterMost;
+ llvm::SmallVector<TemplateArgument> SubstitutedOutermost;
std::optional<MultiLevelTemplateArgumentList> SubstitutedArgs =
- SubstitutionInTemplateArguments(Constraint, MLTAL, SubstitutedOuterMost);
+ SubstitutionInTemplateArguments(Constraint, MLTAL, SubstitutedOutermost);
if (!SubstitutedArgs) {
Satisfaction.IsSatisfied = false;
@@ -1012,8 +1019,9 @@ ExprResult ConstraintSatisfactionChecker::Evaluate(
UnsubstitutedConstraintSatisfactionCacheResult Cache;
Cache.Satisfaction.ContainsErrors = Satisfaction.ContainsErrors;
Cache.Satisfaction.IsSatisfied = Satisfaction.IsSatisfied;
- std::copy(Satisfaction.Details.begin() + Size, Satisfaction.Details.end(),
- std::back_inserter(Cache.Satisfaction.Details));
+ Cache.Satisfaction.Details.insert(Cache.Satisfaction.Details.end(),
+ Satisfaction.Details.begin() + Size,
+ Satisfaction.Details.end());
Cache.SubstExpr = CE;
S.UnsubstitutedConstraintSatisfactionCache.insert({ID, std::move(Cache)});
return CE;
@@ -1217,10 +1225,10 @@ bool Sema::CheckConstraintSatisfaction(
return false;
}
-static const ExprResult
-SubstituteConceptsInConstrainExpression(Sema &S, const NamedDecl *D,
- const ConceptSpecializationExpr *CSE,
- UnsignedOrNone SubstIndex) {
+static ExprResult
+SubstituteConceptsInConstraintExpression(Sema &S, const NamedDecl *D,
+ const ConceptSpecializationExpr *CSE,
+ UnsignedOrNone SubstIndex) {
// [C++2c] [temp.constr.normal]
// Otherwise, to form CE, any non-dependent concept template argument Ai
@@ -1255,7 +1263,7 @@ bool Sema::CheckConstraintSatisfaction(
const ConceptSpecializationExpr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction) {
- ExprResult Res = SubstituteConceptsInConstrainExpression(
+ ExprResult Res = SubstituteConceptsInConstraintExpression(
*this, nullptr, ConstraintExpr, ArgPackSubstIndex);
if (!Res.isUsable())
return true;
@@ -2017,8 +2025,13 @@ void SubstituteParameterMappings::buildParameterMapping(
SemaRef.MarkUsedTemplateParameters(Args->arguments(),
/*Depth=*/0, OccurringIndices);
}
+ unsigned Size = OccurringIndices.count();
+ // When the constraint is independent of any template parameters,
+ // we build an empty mapping so that we can distinguish these cases
+ // from cases where no mapping exists at all, e.g. when there are only atomic
+ // constraints.
TemplateArgumentLoc *TempArgs =
- new (SemaRef.Context) TemplateArgumentLoc[OccurringIndices.count()];
+ new (SemaRef.Context) TemplateArgumentLoc[Size];
llvm::SmallVector<NamedDecl *> UsedParams;
for (unsigned I = 0, J = 0, C = TemplateParams->size(); I != C; ++I) {
SourceLocation Loc = ArgsAsWritten->NumTemplateArgs > I
@@ -2039,7 +2052,6 @@ void SubstituteParameterMappings::buildParameterMapping(
TemplateParams->getLAngleLoc(), UsedParams,
/*RAngleLoc=*/SourceLocation(),
/*RequiresClause=*/nullptr);
- unsigned Size = OccurringIndices.count();
N.updateParameterMapping(
std::move(OccurringIndices), std::move(OccurringIndicesForSubsumption),
MutableArrayRef<TemplateArgumentLoc>{TempArgs, Size}, UsedList);
@@ -2050,6 +2062,10 @@ bool SubstituteParameterMappings::substitute(
if (!N.hasParameterMapping())
buildParameterMapping(N);
+ // If the parameter mapping is empty, there is nothing to substitute.
+ if (N.getParameterMapping().empty())
+ return false;
+
SourceLocation InstLocBegin, InstLocEnd;
llvm::ArrayRef Arguments = ArgsAsWritten->arguments();
if (Arguments.empty()) {
@@ -2289,7 +2305,7 @@ NormalizedConstraint *NormalizedConstraint::fromConstraintExpr(
ConceptDecl *CD = CSE->getNamedConcept()->getCanonicalDecl();
ExprResult Res =
- SubstituteConceptsInConstrainExpression(S, D, CSE, SubstIndex);
+ SubstituteConceptsInConstraintExpression(S, D, CSE, SubstIndex);
if (!Res.isUsable())
return nullptr;
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index 04d46d6..fc3aabf 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -7640,6 +7640,58 @@ static bool isMainVar(DeclarationName Name, VarDecl *VD) {
VD->isExternC());
}
+void Sema::CheckAsmLabel(Scope *S, Expr *E, StorageClass SC,
+ TypeSourceInfo *TInfo, VarDecl *NewVD) {
+
+ // Quickly return if the function does not have an `asm` attribute.
+ if (E == nullptr)
+ return;
+
+ // The parser guarantees this is a string.
+ StringLiteral *SE = cast<StringLiteral>(E);
+ StringRef Label = SE->getString();
+ QualType R = TInfo->getType();
+ if (S->getFnParent() != nullptr) {
+ switch (SC) {
+ case SC_None:
+ case SC_Auto:
+ Diag(E->getExprLoc(), diag::warn_asm_label_on_auto_decl) << Label;
+ break;
+ case SC_Register:
+ // Local Named register
+ if (!Context.getTargetInfo().isValidGCCRegisterName(Label) &&
+ DeclAttrsMatchCUDAMode(getLangOpts(), getCurFunctionDecl()))
+ Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label;
+ break;
+ case SC_Static:
+ case SC_Extern:
+ case SC_PrivateExtern:
+ break;
+ }
+ } else if (SC == SC_Register) {
+ // Global Named register
+ if (DeclAttrsMatchCUDAMode(getLangOpts(), NewVD)) {
+ const auto &TI = Context.getTargetInfo();
+ bool HasSizeMismatch;
+
+ if (!TI.isValidGCCRegisterName(Label))
+ Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label;
+ else if (!TI.validateGlobalRegisterVariable(Label, Context.getTypeSize(R),
+ HasSizeMismatch))
+ Diag(E->getExprLoc(), diag::err_asm_invalid_global_var_reg) << Label;
+ else if (HasSizeMismatch)
+ Diag(E->getExprLoc(), diag::err_asm_register_size_mismatch) << Label;
+ }
+
+ if (!R->isIntegralType(Context) && !R->isPointerType()) {
+ Diag(TInfo->getTypeLoc().getBeginLoc(),
+ diag::err_asm_unsupported_register_type)
+ << TInfo->getTypeLoc().getSourceRange();
+ NewVD->setInvalidDecl(true);
+ }
+ }
+}
+
NamedDecl *Sema::ActOnVariableDeclarator(
Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo,
LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists,
@@ -8124,6 +8176,26 @@ NamedDecl *Sema::ActOnVariableDeclarator(
}
}
+ if (Expr *E = D.getAsmLabel()) {
+ // The parser guarantees this is a string.
+ StringLiteral *SE = cast<StringLiteral>(E);
+ StringRef Label = SE->getString();
+
+ // Insert the asm attribute.
+ NewVD->addAttr(AsmLabelAttr::Create(Context, Label, SE->getStrTokenLoc(0)));
+ } else if (!ExtnameUndeclaredIdentifiers.empty()) {
+ llvm::DenseMap<IdentifierInfo *, AsmLabelAttr *>::iterator I =
+ ExtnameUndeclaredIdentifiers.find(NewVD->getIdentifier());
+ if (I != ExtnameUndeclaredIdentifiers.end()) {
+ if (isDeclExternC(NewVD)) {
+ NewVD->addAttr(I->second);
+ ExtnameUndeclaredIdentifiers.erase(I);
+ } else
+ Diag(NewVD->getLocation(), diag::warn_redefine_extname_not_applied)
+ << /*Variable*/ 1 << NewVD;
+ }
+ }
+
// Handle attributes prior to checking for duplicates in MergeVarDecl
ProcessDeclAttributes(S, NewVD, D);
@@ -8174,65 +8246,11 @@ NamedDecl *Sema::ActOnVariableDeclarator(
if (getLangOpts().ObjCAutoRefCount && ObjC().inferObjCARCLifetime(NewVD))
NewVD->setInvalidDecl();
- // Handle GNU asm-label extension (encoded as an attribute).
- if (Expr *E = D.getAsmLabel()) {
- // The parser guarantees this is a string.
- StringLiteral *SE = cast<StringLiteral>(E);
- StringRef Label = SE->getString();
- if (S->getFnParent() != nullptr) {
- switch (SC) {
- case SC_None:
- case SC_Auto:
- Diag(E->getExprLoc(), diag::warn_asm_label_on_auto_decl) << Label;
- break;
- case SC_Register:
- // Local Named register
- if (!Context.getTargetInfo().isValidGCCRegisterName(Label) &&
- DeclAttrsMatchCUDAMode(getLangOpts(), getCurFunctionDecl()))
- Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label;
- break;
- case SC_Static:
- case SC_Extern:
- case SC_PrivateExtern:
- break;
- }
- } else if (SC == SC_Register) {
- // Global Named register
- if (DeclAttrsMatchCUDAMode(getLangOpts(), NewVD)) {
- const auto &TI = Context.getTargetInfo();
- bool HasSizeMismatch;
-
- if (!TI.isValidGCCRegisterName(Label))
- Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label;
- else if (!TI.validateGlobalRegisterVariable(Label,
- Context.getTypeSize(R),
- HasSizeMismatch))
- Diag(E->getExprLoc(), diag::err_asm_invalid_global_var_reg) << Label;
- else if (HasSizeMismatch)
- Diag(E->getExprLoc(), diag::err_asm_register_size_mismatch) << Label;
- }
-
- if (!R->isIntegralType(Context) && !R->isPointerType()) {
- Diag(TInfo->getTypeLoc().getBeginLoc(),
- diag::err_asm_unsupported_register_type)
- << TInfo->getTypeLoc().getSourceRange();
- NewVD->setInvalidDecl(true);
- }
- }
-
- NewVD->addAttr(AsmLabelAttr::Create(Context, Label, SE->getStrTokenLoc(0)));
- } else if (!ExtnameUndeclaredIdentifiers.empty()) {
- llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*>::iterator I =
- ExtnameUndeclaredIdentifiers.find(NewVD->getIdentifier());
- if (I != ExtnameUndeclaredIdentifiers.end()) {
- if (isDeclExternC(NewVD)) {
- NewVD->addAttr(I->second);
- ExtnameUndeclaredIdentifiers.erase(I);
- } else
- Diag(NewVD->getLocation(), diag::warn_redefine_extname_not_applied)
- << /*Variable*/1 << NewVD;
- }
- }
+ // Check the ASM label here, as we need to know all other attributes of the
+ // Decl first. Otherwise, we can't know if the asm label refers to the
+ // host or device in a CUDA context. The device has other registers than
+ // host and we must know where the function will be placed.
+ CheckAsmLabel(S, D.getAsmLabel(), SC, TInfo, NewVD);
// Find the shadowed declaration before filtering for scope.
NamedDecl *ShadowedDecl = D.getCXXScopeSpec().isEmpty()
diff --git a/clang/lib/Sema/SemaRISCV.cpp b/clang/lib/Sema/SemaRISCV.cpp
index 3ba93ff9..c5ef0d5 100644
--- a/clang/lib/Sema/SemaRISCV.cpp
+++ b/clang/lib/Sema/SemaRISCV.cpp
@@ -1464,7 +1464,8 @@ void SemaRISCV::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
}
else if (Info.ElementType->isBFloat16Type() &&
!FeatureMap.lookup("zvfbfmin") &&
- !FeatureMap.lookup("xandesvbfhcvt"))
+ !FeatureMap.lookup("xandesvbfhcvt") &&
+ !FeatureMap.lookup("experimental-zvfbfa"))
if (DeclareAndesVectorBuiltins) {
Diag(Loc, diag::err_riscv_type_requires_extension, D)
<< Ty << "zvfbfmin or xandesvbfhcvt";
diff --git a/clang/lib/Sema/SemaTemplateInstantiate.cpp b/clang/lib/Sema/SemaTemplateInstantiate.cpp
index ca7e3b2..7f85805 100644
--- a/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -639,15 +639,8 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(
}
Invalid = SemaRef.pushCodeSynthesisContext(Inst);
- if (!Invalid) {
- AlreadyInstantiating =
- !Inst.Entity
- ? false
- : !SemaRef.InstantiatingSpecializations
- .insert({Inst.Entity->getCanonicalDecl(), Inst.Kind})
- .second;
+ if (!Invalid)
atTemplateBegin(SemaRef.TemplateInstCallbacks, SemaRef, Inst);
- }
}
Sema::InstantiatingTemplate::InstantiatingTemplate(
@@ -902,13 +895,6 @@ void Sema::popCodeSynthesisContext() {
void Sema::InstantiatingTemplate::Clear() {
if (!Invalid) {
- if (!AlreadyInstantiating) {
- auto &Active = SemaRef.CodeSynthesisContexts.back();
- if (Active.Entity)
- SemaRef.InstantiatingSpecializations.erase(
- {Active.Entity->getCanonicalDecl(), Active.Kind});
- }
-
atTemplateEnd(SemaRef.TemplateInstCallbacks, SemaRef,
SemaRef.CodeSynthesisContexts.back());
@@ -2864,9 +2850,9 @@ TemplateInstantiator::TransformNestedRequirement(
TemplateArgs, Constraint->getSourceRange(), Satisfaction,
/*TopLevelConceptId=*/nullptr, &NewConstraint);
- assert(!Success || !Trap.hasErrorOccurred() &&
- "Substitution failures must be handled "
- "by CheckConstraintSatisfaction.");
+ assert((!Success || !Trap.hasErrorOccurred()) &&
+ "Substitution failures must be handled "
+ "by CheckConstraintSatisfaction.");
}
if (!Success || Satisfaction.HasSubstitutionFailure())
@@ -3312,17 +3298,20 @@ bool Sema::SubstDefaultArgument(
FunctionDecl *FD = cast<FunctionDecl>(Param->getDeclContext());
Expr *PatternExpr = Param->getUninstantiatedDefaultArg();
+ RecursiveInstGuard AlreadyInstantiating(
+ *this, Param, RecursiveInstGuard::Kind::DefaultArgument);
+ if (AlreadyInstantiating) {
+ Param->setInvalidDecl();
+ return Diag(Param->getBeginLoc(), diag::err_recursive_default_argument)
+ << FD << PatternExpr->getSourceRange();
+ }
+
EnterExpressionEvaluationContext EvalContext(
*this, ExpressionEvaluationContext::PotentiallyEvaluated, Param);
InstantiatingTemplate Inst(*this, Loc, Param, TemplateArgs.getInnermost());
if (Inst.isInvalid())
return true;
- if (Inst.isAlreadyInstantiating()) {
- Diag(Param->getBeginLoc(), diag::err_recursive_default_argument) << FD;
- Param->setInvalidDecl();
- return true;
- }
ExprResult Result;
// C++ [dcl.fct.default]p5:
@@ -3554,12 +3543,26 @@ namespace clang {
}
}
-bool
-Sema::InstantiateClass(SourceLocation PointOfInstantiation,
- CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
- const MultiLevelTemplateArgumentList &TemplateArgs,
- TemplateSpecializationKind TSK,
- bool Complain) {
+bool Sema::InstantiateClass(SourceLocation PointOfInstantiation,
+ CXXRecordDecl *Instantiation,
+ CXXRecordDecl *Pattern,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ TemplateSpecializationKind TSK, bool Complain) {
+#ifndef NDEBUG
+ RecursiveInstGuard AlreadyInstantiating(*this, Instantiation,
+ RecursiveInstGuard::Kind::Template);
+ assert(!AlreadyInstantiating && "should have been caught by caller");
+#endif
+
+ return InstantiateClassImpl(PointOfInstantiation, Instantiation, Pattern,
+ TemplateArgs, TSK, Complain);
+}
+
+bool Sema::InstantiateClassImpl(
+ SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation,
+ CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs,
+ TemplateSpecializationKind TSK, bool Complain) {
+
CXXRecordDecl *PatternDef
= cast_or_null<CXXRecordDecl>(Pattern->getDefinition());
if (DiagnoseUninstantiableTemplate(PointOfInstantiation, Instantiation,
@@ -3596,7 +3599,6 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
InstantiatingTemplate Inst(*this, PointOfInstantiation, Instantiation);
if (Inst.isInvalid())
return true;
- assert(!Inst.isAlreadyInstantiating() && "should have been caught by caller");
PrettyDeclStackTraceEntry CrashInfo(Context, Instantiation, SourceLocation(),
"instantiating class definition");
@@ -3808,6 +3810,12 @@ bool Sema::InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK) {
+#ifndef NDEBUG
+ RecursiveInstGuard AlreadyInstantiating(*this, Instantiation,
+ RecursiveInstGuard::Kind::Template);
+ assert(!AlreadyInstantiating && "should have been caught by caller");
+#endif
+
EnumDecl *PatternDef = Pattern->getDefinition();
if (DiagnoseUninstantiableTemplate(PointOfInstantiation, Instantiation,
Instantiation->getInstantiatedFromMemberEnum(),
@@ -3825,8 +3833,6 @@ bool Sema::InstantiateEnum(SourceLocation PointOfInstantiation,
InstantiatingTemplate Inst(*this, PointOfInstantiation, Instantiation);
if (Inst.isInvalid())
return true;
- if (Inst.isAlreadyInstantiating())
- return false;
PrettyDeclStackTraceEntry CrashInfo(Context, Instantiation, SourceLocation(),
"instantiating enum definition");
@@ -3865,6 +3871,14 @@ bool Sema::InstantiateInClassInitializer(
Pattern->getInClassInitStyle() &&
"pattern and instantiation disagree about init style");
+ RecursiveInstGuard AlreadyInstantiating(*this, Instantiation,
+ RecursiveInstGuard::Kind::Template);
+ if (AlreadyInstantiating)
+ // Error out if we hit an instantiation cycle for this initializer.
+ return Diag(PointOfInstantiation,
+ diag::err_default_member_initializer_cycle)
+ << Instantiation;
+
// Error out if we haven't parsed the initializer of the pattern yet because
// we are waiting for the closing brace of the outer class.
Expr *OldInit = Pattern->getInClassInitializer();
@@ -3883,12 +3897,6 @@ bool Sema::InstantiateInClassInitializer(
InstantiatingTemplate Inst(*this, PointOfInstantiation, Instantiation);
if (Inst.isInvalid())
return true;
- if (Inst.isAlreadyInstantiating()) {
- // Error out if we hit an instantiation cycle for this initializer.
- Diag(PointOfInstantiation, diag::err_default_member_initializer_cycle)
- << Instantiation;
- return true;
- }
PrettyDeclStackTraceEntry CrashInfo(Context, Instantiation, SourceLocation(),
"instantiating default member init");
@@ -3972,8 +3980,6 @@ static ActionResult<CXXRecordDecl *> getPatternForClassTemplateSpecialization(
Sema::InstantiatingTemplate Inst(S, PointOfInstantiation, ClassTemplateSpec);
if (Inst.isInvalid())
return {/*Invalid=*/true};
- if (Inst.isAlreadyInstantiating())
- return {/*Invalid=*/false};
llvm::PointerUnion<ClassTemplateDecl *,
ClassTemplatePartialSpecializationDecl *>
@@ -4136,6 +4142,11 @@ bool Sema::InstantiateClassTemplateSpecialization(
if (ClassTemplateSpec->isInvalidDecl())
return true;
+ Sema::RecursiveInstGuard AlreadyInstantiating(
+ *this, ClassTemplateSpec, Sema::RecursiveInstGuard::Kind::Template);
+ if (AlreadyInstantiating)
+ return false;
+
bool HadAvaibilityWarning =
ShouldDiagnoseAvailabilityOfDecl(ClassTemplateSpec, nullptr, nullptr)
.first != AR_Available;
@@ -4148,7 +4159,7 @@ bool Sema::InstantiateClassTemplateSpecialization(
if (!Pattern.isUsable())
return Pattern.isInvalid();
- bool Err = InstantiateClass(
+ bool Err = InstantiateClassImpl(
PointOfInstantiation, ClassTemplateSpec, Pattern.get(),
getTemplateInstantiationArgs(ClassTemplateSpec), TSK, Complain);
diff --git a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index 4863b45..28925cc 100644
--- a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -5312,6 +5312,16 @@ void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
if (Proto->getExceptionSpecType() != EST_Uninstantiated)
return;
+ RecursiveInstGuard AlreadyInstantiating(
+ *this, Decl, RecursiveInstGuard::Kind::ExceptionSpec);
+ if (AlreadyInstantiating) {
+ // This exception specification indirectly depends on itself. Reject.
+ // FIXME: Corresponding rule in the standard?
+ Diag(PointOfInstantiation, diag::err_exception_spec_cycle) << Decl;
+ UpdateExceptionSpec(Decl, EST_None);
+ return;
+ }
+
InstantiatingTemplate Inst(*this, PointOfInstantiation, Decl,
InstantiatingTemplate::ExceptionSpecification());
if (Inst.isInvalid()) {
@@ -5320,13 +5330,6 @@ void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
UpdateExceptionSpec(Decl, EST_None);
return;
}
- if (Inst.isAlreadyInstantiating()) {
- // This exception specification indirectly depends on itself. Reject.
- // FIXME: Corresponding rule in the standard?
- Diag(PointOfInstantiation, diag::err_exception_spec_cycle) << Decl;
- UpdateExceptionSpec(Decl, EST_None);
- return;
- }
// Enter the scope of this instantiation. We don't use
// PushDeclContext because we don't have a scope.
@@ -5386,8 +5389,6 @@ TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
if (ActiveInst.Kind == ActiveInstType::ExplicitTemplateArgumentSubstitution ||
ActiveInst.Kind == ActiveInstType::DeducedTemplateArgumentSubstitution) {
if (isa<FunctionTemplateDecl>(ActiveInst.Entity)) {
- SemaRef.InstantiatingSpecializations.erase(
- {ActiveInst.Entity->getCanonicalDecl(), ActiveInst.Kind});
atTemplateEnd(SemaRef.TemplateInstCallbacks, SemaRef, ActiveInst);
ActiveInst.Kind = ActiveInstType::TemplateInstantiation;
ActiveInst.Entity = New;
@@ -5545,6 +5546,12 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
Function = const_cast<FunctionDecl*>(ExistingDefn);
}
+#ifndef NDEBUG
+ RecursiveInstGuard AlreadyInstantiating(*this, Function,
+ RecursiveInstGuard::Kind::Template);
+ assert(!AlreadyInstantiating && "should have been caught by caller");
+#endif
+
// Find the function body that we'll be substituting.
const FunctionDecl *PatternDecl = Function->getTemplateInstantiationPattern();
assert(PatternDecl && "instantiating a non-template");
@@ -5684,7 +5691,7 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
}
InstantiatingTemplate Inst(*this, PointOfInstantiation, Function);
- if (Inst.isInvalid() || Inst.isAlreadyInstantiating())
+ if (Inst.isInvalid())
return;
PrettyDeclStackTraceEntry CrashInfo(Context, Function, SourceLocation(),
"instantiating function definition");
@@ -6253,6 +6260,11 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
if (TSK == TSK_ExplicitSpecialization)
return;
+ RecursiveInstGuard AlreadyInstantiating(*this, Var,
+ RecursiveInstGuard::Kind::Template);
+ if (AlreadyInstantiating)
+ return;
+
// Find the pattern and the arguments to substitute into it.
VarDecl *PatternDecl = Var->getTemplateInstantiationPattern();
assert(PatternDecl && "no pattern for templated variable");
@@ -6276,7 +6288,7 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
// FIXME: Factor out the duplicated instantiation context setup/tear down
// code here.
InstantiatingTemplate Inst(*this, PointOfInstantiation, Var);
- if (Inst.isInvalid() || Inst.isAlreadyInstantiating())
+ if (Inst.isInvalid())
return;
PrettyDeclStackTraceEntry CrashInfo(Context, Var, SourceLocation(),
"instantiating variable initializer");
@@ -6380,7 +6392,7 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
}
InstantiatingTemplate Inst(*this, PointOfInstantiation, Var);
- if (Inst.isInvalid() || Inst.isAlreadyInstantiating())
+ if (Inst.isInvalid())
return;
PrettyDeclStackTraceEntry CrashInfo(Context, Var, SourceLocation(),
"instantiating variable definition");
diff --git a/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
index bf35bee..3ddd659 100644
--- a/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
@@ -104,7 +104,7 @@ class RAIIMutexDescriptor {
// this function is called instead of early returning it. To avoid this, a
// bool variable (IdentifierInfoInitialized) is used and the function will
// be run only once.
- const auto &ASTCtx = Call.getState()->getStateManager().getContext();
+ const auto &ASTCtx = Call.getASTContext();
Guard = &ASTCtx.Idents.get(GuardName);
}
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
index 9d3aeff..2420848 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -929,7 +929,7 @@ ObjCDeallocChecker::getValueReleasedByNillingOut(const ObjCMethodCall &M,
SVal Arg = M.getArgSVal(0);
ProgramStateRef notNilState, nilState;
std::tie(notNilState, nilState) =
- M.getState()->assume(Arg.castAs<DefinedOrUnknownSVal>());
+ C.getState()->assume(Arg.castAs<DefinedOrUnknownSVal>());
if (!(nilState && !notNilState))
return nullptr;
diff --git a/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
index f984caf..227cbfa 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
@@ -34,7 +34,7 @@ class ObjCSuperDeallocChecker
this, "[super dealloc] should not be called more than once",
categories::CoreFoundationObjectiveC};
- void initIdentifierInfoAndSelectors(ASTContext &Ctx) const;
+ void initIdentifierInfoAndSelectors(const ASTContext &Ctx) const;
bool isSuperDeallocMessage(const ObjCMethodCall &M) const;
@@ -214,8 +214,8 @@ void ObjCSuperDeallocChecker::diagnoseCallArguments(const CallEvent &CE,
}
}
-void
-ObjCSuperDeallocChecker::initIdentifierInfoAndSelectors(ASTContext &Ctx) const {
+void ObjCSuperDeallocChecker::initIdentifierInfoAndSelectors(
+ const ASTContext &Ctx) const {
if (IIdealloc)
return;
@@ -230,7 +230,7 @@ ObjCSuperDeallocChecker::isSuperDeallocMessage(const ObjCMethodCall &M) const {
if (M.getOriginExpr()->getReceiverKind() != ObjCMessageExpr::SuperInstance)
return false;
- ASTContext &Ctx = M.getState()->getStateManager().getContext();
+ const ASTContext &Ctx = M.getASTContext();
initIdentifierInfoAndSelectors(Ctx);
return M.getSelector() == SELdealloc;
diff --git a/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp
index 4fc1c57..db8bbee 100644
--- a/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp
@@ -211,13 +211,13 @@ private:
if (!DefaultType)
return;
- ProgramStateRef State = ConstructorCall->getState();
+ ProgramStateRef State = C.getState();
State = State->set<VariantHeldTypeMap>(ThisMemRegion, *DefaultType);
C.addTransition(State);
}
bool handleStdGetCall(const CallEvent &Call, CheckerContext &C) const {
- ProgramStateRef State = Call.getState();
+ ProgramStateRef State = C.getState();
const auto &ArgType = Call.getArgSVal(0)
.getType(C.getASTContext())
diff --git a/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h b/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h
index dec4612..b8fb572 100644
--- a/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h
+++ b/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h
@@ -52,7 +52,7 @@ removeInformationStoredForDeadInstances(const CallEvent &Call,
template <class TypeMap>
void handleConstructorAndAssignment(const CallEvent &Call, CheckerContext &C,
SVal ThisSVal) {
- ProgramStateRef State = Call.getState();
+ ProgramStateRef State = C.getState();
if (!State)
return;
diff --git a/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp b/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
index 44c6f9f..8ee4832 100644
--- a/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
@@ -731,19 +731,22 @@ void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
ExplodedNodeSet checkDst;
NodeBuilder B(Pred, checkDst, Eng.getBuilderContext());
+ ProgramStateRef State = Pred->getState();
+ CallEventRef<> UpdatedCall = Call.cloneWithState(State);
+
// Check if any of the EvalCall callbacks can evaluate the call.
for (const auto &EvalCallChecker : EvalCallCheckers) {
// TODO: Support the situation when the call doesn't correspond
// to any Expr.
ProgramPoint L = ProgramPoint::getProgramPoint(
- Call.getOriginExpr(), ProgramPoint::PostStmtKind,
+ UpdatedCall->getOriginExpr(), ProgramPoint::PostStmtKind,
Pred->getLocationContext(), EvalCallChecker.Checker);
bool evaluated = false;
- { // CheckerContext generates transitions(populates checkDest) on
+ { // CheckerContext generates transitions (populates checkDest) on
// destruction, so introduce the scope to make sure it gets properly
// populated.
CheckerContext C(B, Eng, Pred, L);
- evaluated = EvalCallChecker(Call, C);
+ evaluated = EvalCallChecker(*UpdatedCall, C);
}
#ifndef NDEBUG
if (evaluated && evaluatorChecker) {
@@ -774,7 +777,7 @@ void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
// If none of the checkers evaluated the call, ask ExprEngine to handle it.
if (!evaluatorChecker) {
NodeBuilder B(Pred, Dst, Eng.getBuilderContext());
- Eng.defaultEvalCall(B, Pred, Call, CallOpts);
+ Eng.defaultEvalCall(B, Pred, *UpdatedCall, CallOpts);
}
}
}
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index 0c491b8..ac6c1d7 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -628,6 +628,8 @@ void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
ProgramStateRef ExprEngine::finishArgumentConstruction(ProgramStateRef State,
const CallEvent &Call) {
+ // WARNING: The state attached to 'Call' may be obsolete, do not call any
+ // methods that rely on it!
const Expr *E = Call.getOriginExpr();
// FIXME: Constructors to placement arguments of operator new
// are not supported yet.
@@ -653,6 +655,8 @@ ProgramStateRef ExprEngine::finishArgumentConstruction(ProgramStateRef State,
void ExprEngine::finishArgumentConstruction(ExplodedNodeSet &Dst,
ExplodedNode *Pred,
const CallEvent &Call) {
+ // WARNING: The state attached to 'Call' may be obsolete, do not call any
+ // methods that rely on it!
ProgramStateRef State = Pred->getState();
ProgramStateRef CleanedState = finishArgumentConstruction(State, Call);
if (CleanedState == State) {
@@ -670,35 +674,33 @@ void ExprEngine::finishArgumentConstruction(ExplodedNodeSet &Dst,
}
void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
- const CallEvent &Call) {
- // WARNING: At this time, the state attached to 'Call' may be older than the
- // state in 'Pred'. This is a minor optimization since CheckerManager will
- // use an updated CallEvent instance when calling checkers, but if 'Call' is
- // ever used directly in this function all callers should be updated to pass
- // the most recent state. (It is probably not worth doing the work here since
- // for some callers this will not be necessary.)
+ const CallEvent &CallTemplate) {
+ // NOTE: CallTemplate is called a "template" because its attached state may
+ // be obsolete (compared to the state of Pred). The state-dependent methods
+ // of CallEvent should be used only after a `cloneWithState` call that
+ // attaches the up-to-date state to this template object.
// Run any pre-call checks using the generic call interface.
ExplodedNodeSet dstPreVisit;
- getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred,
- Call, *this);
+ getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred, CallTemplate,
+ *this);
// Actually evaluate the function call. We try each of the checkers
// to see if the can evaluate the function call, and get a callback at
// defaultEvalCall if all of them fail.
ExplodedNodeSet dstCallEvaluated;
- getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
- Call, *this, EvalCallOptions());
+ getCheckerManager().runCheckersForEvalCall(
+ dstCallEvaluated, dstPreVisit, CallTemplate, *this, EvalCallOptions());
// If there were other constructors called for object-type arguments
// of this call, clean them up.
ExplodedNodeSet dstArgumentCleanup;
for (ExplodedNode *I : dstCallEvaluated)
- finishArgumentConstruction(dstArgumentCleanup, I, Call);
+ finishArgumentConstruction(dstArgumentCleanup, I, CallTemplate);
ExplodedNodeSet dstPostCall;
getCheckerManager().runCheckersForPostCall(dstPostCall, dstArgumentCleanup,
- Call, *this);
+ CallTemplate, *this);
// Escaping symbols conjured during invalidating the regions above.
// Note that, for inlined calls the nodes were put back into the worklist,
@@ -708,12 +710,13 @@ void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
// Run pointerEscape callback with the newly conjured symbols.
SmallVector<std::pair<SVal, SVal>, 8> Escaped;
for (ExplodedNode *I : dstPostCall) {
- NodeBuilder B(I, Dst, *currBldrCtx);
ProgramStateRef State = I->getState();
+ CallEventRef<> Call = CallTemplate.cloneWithState(State);
+ NodeBuilder B(I, Dst, *currBldrCtx);
Escaped.clear();
{
unsigned Arg = -1;
- for (const ParmVarDecl *PVD : Call.parameters()) {
+ for (const ParmVarDecl *PVD : Call->parameters()) {
++Arg;
QualType ParamTy = PVD->getType();
if (ParamTy.isNull() ||
@@ -722,13 +725,13 @@ void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
QualType Pointee = ParamTy->getPointeeType();
if (Pointee.isConstQualified() || Pointee->isVoidType())
continue;
- if (const MemRegion *MR = Call.getArgSVal(Arg).getAsRegion())
+ if (const MemRegion *MR = Call->getArgSVal(Arg).getAsRegion())
Escaped.emplace_back(loc::MemRegionVal(MR), State->getSVal(MR, Pointee));
}
}
State = processPointerEscapedOnBind(State, Escaped, I->getLocationContext(),
- PSK_EscapeOutParameters, &Call);
+ PSK_EscapeOutParameters, &*Call);
if (State == I->getState())
Dst.insert(I);
@@ -1212,48 +1215,47 @@ static bool isTrivialObjectAssignment(const CallEvent &Call) {
}
void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
- const CallEvent &CallTemplate,
+ const CallEvent &Call,
const EvalCallOptions &CallOpts) {
// Make sure we have the most recent state attached to the call.
ProgramStateRef State = Pred->getState();
- CallEventRef<> Call = CallTemplate.cloneWithState(State);
// Special-case trivial assignment operators.
- if (isTrivialObjectAssignment(*Call)) {
- performTrivialCopy(Bldr, Pred, *Call);
+ if (isTrivialObjectAssignment(Call)) {
+ performTrivialCopy(Bldr, Pred, Call);
return;
}
// Try to inline the call.
// The origin expression here is just used as a kind of checksum;
// this should still be safe even for CallEvents that don't come from exprs.
- const Expr *E = Call->getOriginExpr();
+ const Expr *E = Call.getOriginExpr();
ProgramStateRef InlinedFailedState = getInlineFailedState(State, E);
if (InlinedFailedState) {
// If we already tried once and failed, make sure we don't retry later.
State = InlinedFailedState;
} else {
- RuntimeDefinition RD = Call->getRuntimeDefinition();
- Call->setForeign(RD.isForeign());
+ RuntimeDefinition RD = Call.getRuntimeDefinition();
+ Call.setForeign(RD.isForeign());
const Decl *D = RD.getDecl();
- if (shouldInlineCall(*Call, D, Pred, CallOpts)) {
+ if (shouldInlineCall(Call, D, Pred, CallOpts)) {
if (RD.mayHaveOtherDefinitions()) {
AnalyzerOptions &Options = getAnalysisManager().options;
// Explore with and without inlining the call.
if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) {
- BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred);
+ BifurcateCall(RD.getDispatchRegion(), Call, D, Bldr, Pred);
return;
}
// Don't inline if we're not in any dynamic dispatch mode.
if (Options.getIPAMode() != IPAK_DynamicDispatch) {
- conservativeEvalCall(*Call, Bldr, Pred, State);
+ conservativeEvalCall(Call, Bldr, Pred, State);
return;
}
}
- ctuBifurcate(*Call, D, Bldr, Pred, State);
+ ctuBifurcate(Call, D, Bldr, Pred, State);
return;
}
}
@@ -1261,10 +1263,10 @@ void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
// If we can't inline it, clean up the state traits used only if the function
// is inlined.
State = removeStateTraitsUsedForArrayEvaluation(
- State, dyn_cast_or_null<CXXConstructExpr>(E), Call->getLocationContext());
+ State, dyn_cast_or_null<CXXConstructExpr>(E), Call.getLocationContext());
// Also handle the return value and invalidate the regions.
- conservativeEvalCall(*Call, Bldr, Pred, State);
+ conservativeEvalCall(Call, Bldr, Pred, State);
}
void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
index 5a4e805..dad3d0da 100644
--- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp
+++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
@@ -654,6 +654,9 @@ PrototypeDescriptor::parsePrototypeDescriptor(
case 'F':
TM |= TypeModifier::Float;
break;
+ case 'Y':
+ TM |= TypeModifier::BFloat;
+ break;
case 'S':
TM |= TypeModifier::LMUL1;
break;
@@ -704,6 +707,8 @@ void RVVType::applyModifier(const PrototypeDescriptor &Transformer) {
ElementBitwidth *= 2;
LMUL.MulLog2LMUL(1);
Scale = LMUL.getScale(ElementBitwidth);
+ if (ScalarType == ScalarTypeKind::BFloat)
+ ScalarType = ScalarTypeKind::Float;
break;
case VectorTypeModifier::Widening4XVector:
ElementBitwidth *= 4;
diff --git a/clang/test/AST/ByteCode/extern.cpp b/clang/test/AST/ByteCode/extern.cpp
index a616269..c321593 100644
--- a/clang/test/AST/ByteCode/extern.cpp
+++ b/clang/test/AST/ByteCode/extern.cpp
@@ -1,9 +1,11 @@
// RUN: %clang_cc1 -fexperimental-new-constant-interpreter -verify=both,expected %s
-// RUN: %clang_cc1 -verify=both,ref %s
-
+// RUN: %clang_cc1 -verify=both,ref %s
// both-no-diagnostics
+extern const double Num;
+extern const double Num = 12;
+
extern const int E;
constexpr int getE() {
return E;
diff --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1.c b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1.c
index 19e5243..1ed59c6c 100644
--- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1.c
+++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1.c
@@ -24,12 +24,12 @@
// CHECK-LABEL: @test_svld1_s8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
// CPP-CHECK-LABEL: @_Z13test_svld1_s8u10__SVBool_tPKa(
// CPP-CHECK-NEXT: entry:
-// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
svint8_t test_svld1_s8(svbool_t pg, const int8_t *base) MODE_ATTR
@@ -40,13 +40,13 @@ svint8_t test_svld1_s8(svbool_t pg, const int8_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1_s16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
//
// CPP-CHECK-LABEL: @_Z14test_svld1_s16u10__SVBool_tPKs(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
//
svint16_t test_svld1_s16(svbool_t pg, const int16_t *base) MODE_ATTR
@@ -57,13 +57,13 @@ svint16_t test_svld1_s16(svbool_t pg, const int16_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
//
// CPP-CHECK-LABEL: @_Z14test_svld1_s32u10__SVBool_tPKi(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
//
svint32_t test_svld1_s32(svbool_t pg, const int32_t *base) MODE_ATTR
@@ -74,13 +74,13 @@ svint32_t test_svld1_s32(svbool_t pg, const int32_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1_s64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
//
// CPP-CHECK-LABEL: @_Z14test_svld1_s64u10__SVBool_tPKl(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
//
svint64_t test_svld1_s64(svbool_t pg, const int64_t *base) MODE_ATTR
@@ -90,12 +90,12 @@ svint64_t test_svld1_s64(svbool_t pg, const int64_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1_u8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
// CPP-CHECK-LABEL: @_Z13test_svld1_u8u10__SVBool_tPKh(
// CPP-CHECK-NEXT: entry:
-// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
svuint8_t test_svld1_u8(svbool_t pg, const uint8_t *base) MODE_ATTR
@@ -106,13 +106,13 @@ svuint8_t test_svld1_u8(svbool_t pg, const uint8_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
//
// CPP-CHECK-LABEL: @_Z14test_svld1_u16u10__SVBool_tPKt(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
//
svuint16_t test_svld1_u16(svbool_t pg, const uint16_t *base) MODE_ATTR
@@ -123,13 +123,13 @@ svuint16_t test_svld1_u16(svbool_t pg, const uint16_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
//
// CPP-CHECK-LABEL: @_Z14test_svld1_u32u10__SVBool_tPKj(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
//
svuint32_t test_svld1_u32(svbool_t pg, const uint32_t *base) MODE_ATTR
@@ -140,13 +140,13 @@ svuint32_t test_svld1_u32(svbool_t pg, const uint32_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
//
// CPP-CHECK-LABEL: @_Z14test_svld1_u64u10__SVBool_tPKm(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
//
svuint64_t test_svld1_u64(svbool_t pg, const uint64_t *base) MODE_ATTR
@@ -157,13 +157,13 @@ svuint64_t test_svld1_u64(svbool_t pg, const uint64_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> zeroinitializer)
// CHECK-NEXT: ret <vscale x 8 x half> [[TMP1]]
//
// CPP-CHECK-LABEL: @_Z14test_svld1_f16u10__SVBool_tPKDh(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP1]]
//
svfloat16_t test_svld1_f16(svbool_t pg, const float16_t *base) MODE_ATTR
@@ -174,13 +174,13 @@ svfloat16_t test_svld1_f16(svbool_t pg, const float16_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1_f32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> zeroinitializer)
// CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
//
// CPP-CHECK-LABEL: @_Z14test_svld1_f32u10__SVBool_tPKf(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
//
svfloat32_t test_svld1_f32(svbool_t pg, const float32_t *base) MODE_ATTR
@@ -191,13 +191,13 @@ svfloat32_t test_svld1_f32(svbool_t pg, const float32_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1_f64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> zeroinitializer)
// CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
//
// CPP-CHECK-LABEL: @_Z14test_svld1_f64u10__SVBool_tPKd(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
//
svfloat64_t test_svld1_f64(svbool_t pg, const float64_t *base) MODE_ATTR
@@ -207,12 +207,12 @@ svfloat64_t test_svld1_f64(svbool_t pg, const float64_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1_mf8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
// CPP-CHECK-LABEL: @_Z14test_svld1_mf8u10__SVBool_tPKu6__mfp8(
// CPP-CHECK-NEXT: entry:
-// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
svmfloat8_t test_svld1_mf8(svbool_t pg, const mfloat8_t *base) MODE_ATTR
@@ -226,7 +226,7 @@ svmfloat8_t test_svld1_mf8(svbool_t pg, const mfloat8_t *base) MODE_ATTR
// CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]]
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr align 1 [[TMP2]], <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP3]]
//
// CPP-CHECK-LABEL: @_Z18test_svld1_vnum_s8u10__SVBool_tPKal(
@@ -235,7 +235,7 @@ svmfloat8_t test_svld1_mf8(svbool_t pg, const mfloat8_t *base) MODE_ATTR
// CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]]
// CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr align 1 [[TMP2]], <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP3]]
//
svint8_t test_svld1_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum) MODE_ATTR
@@ -250,7 +250,7 @@ svint8_t test_svld1_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum) MODE_
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP4]]
//
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_s16u10__SVBool_tPKsl(
@@ -260,7 +260,7 @@ svint8_t test_svld1_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum) MODE_
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP4]]
//
svint16_t test_svld1_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum) MODE_ATTR
@@ -275,7 +275,7 @@ svint16_t test_svld1_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum) MO
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP4]]
//
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_s32u10__SVBool_tPKil(
@@ -285,7 +285,7 @@ svint16_t test_svld1_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum) MO
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP4]]
//
svint32_t test_svld1_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum) MODE_ATTR
@@ -300,7 +300,7 @@ svint32_t test_svld1_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum) MO
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP4]]
//
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_s64u10__SVBool_tPKll(
@@ -310,7 +310,7 @@ svint32_t test_svld1_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum) MO
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP4]]
//
svint64_t test_svld1_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum) MODE_ATTR
@@ -324,7 +324,7 @@ svint64_t test_svld1_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum) MO
// CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]]
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr align 1 [[TMP2]], <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP3]]
//
// CPP-CHECK-LABEL: @_Z18test_svld1_vnum_u8u10__SVBool_tPKhl(
@@ -333,7 +333,7 @@ svint64_t test_svld1_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum) MO
// CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]]
// CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr align 1 [[TMP2]], <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP3]]
//
svuint8_t test_svld1_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum) MODE_ATTR
@@ -348,7 +348,7 @@ svuint8_t test_svld1_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum) MOD
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP4]]
//
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_u16u10__SVBool_tPKtl(
@@ -358,7 +358,7 @@ svuint8_t test_svld1_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum) MOD
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP4]]
//
svuint16_t test_svld1_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnum) MODE_ATTR
@@ -373,7 +373,7 @@ svuint16_t test_svld1_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP4]]
//
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_u32u10__SVBool_tPKjl(
@@ -383,7 +383,7 @@ svuint16_t test_svld1_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP4]]
//
svuint32_t test_svld1_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnum) MODE_ATTR
@@ -398,7 +398,7 @@ svuint32_t test_svld1_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP4]]
//
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_u64u10__SVBool_tPKml(
@@ -408,7 +408,7 @@ svuint32_t test_svld1_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP4]]
//
svuint64_t test_svld1_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnum) MODE_ATTR
@@ -423,7 +423,7 @@ svuint64_t test_svld1_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> zeroinitializer)
// CHECK-NEXT: ret <vscale x 8 x half> [[TMP4]]
//
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_f16u10__SVBool_tPKDhl(
@@ -433,7 +433,7 @@ svuint64_t test_svld1_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP4]]
//
svfloat16_t test_svld1_vnum_f16(svbool_t pg, const float16_t *base, int64_t vnum) MODE_ATTR
@@ -448,7 +448,7 @@ svfloat16_t test_svld1_vnum_f16(svbool_t pg, const float16_t *base, int64_t vnum
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> zeroinitializer)
// CHECK-NEXT: ret <vscale x 4 x float> [[TMP4]]
//
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_f32u10__SVBool_tPKfl(
@@ -458,7 +458,7 @@ svfloat16_t test_svld1_vnum_f16(svbool_t pg, const float16_t *base, int64_t vnum
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP4]]
//
svfloat32_t test_svld1_vnum_f32(svbool_t pg, const float32_t *base, int64_t vnum) MODE_ATTR
@@ -473,7 +473,7 @@ svfloat32_t test_svld1_vnum_f32(svbool_t pg, const float32_t *base, int64_t vnum
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> zeroinitializer)
// CHECK-NEXT: ret <vscale x 2 x double> [[TMP4]]
//
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_f64u10__SVBool_tPKdl(
@@ -483,7 +483,7 @@ svfloat32_t test_svld1_vnum_f32(svbool_t pg, const float32_t *base, int64_t vnum
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP4]]
//
svfloat64_t test_svld1_vnum_f64(svbool_t pg, const float64_t *base, int64_t vnum) MODE_ATTR
@@ -497,7 +497,7 @@ svfloat64_t test_svld1_vnum_f64(svbool_t pg, const float64_t *base, int64_t vnum
// CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]]
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr align 1 [[TMP2]], <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP3]]
//
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_mf8u10__SVBool_tPKu6__mfp8l(
@@ -506,7 +506,7 @@ svfloat64_t test_svld1_vnum_f64(svbool_t pg, const float64_t *base, int64_t vnum
// CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]]
// CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr align 1 [[TMP2]], <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP3]]
//
svmfloat8_t test_svld1_vnum_mf8(svbool_t pg, const mfloat8_t *base, int64_t vnum) MODE_ATTR
@@ -1205,13 +1205,13 @@ svfloat64_t test_svld1_gather_u64base_index_f64(svbool_t pg, svuint64_t bases, i
// CHECK-LABEL: @test_svld1_bf16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x bfloat> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x bfloat> zeroinitializer)
// CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP1]]
//
// CPP-CHECK-LABEL: @_Z15test_svld1_bf16u10__SVBool_tPKu6__bf16(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x bfloat> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x bfloat> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP1]]
//
svbfloat16_t test_svld1_bf16(svbool_t pg, const bfloat16_t *base) MODE_ATTR
@@ -1226,7 +1226,7 @@ svbfloat16_t test_svld1_bf16(svbool_t pg, const bfloat16_t *base) MODE_ATTR
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x bfloat> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x bfloat> zeroinitializer)
// CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP4]]
//
// CPP-CHECK-LABEL: @_Z20test_svld1_vnum_bf16u10__SVBool_tPKu6__bf16l(
@@ -1236,7 +1236,7 @@ svbfloat16_t test_svld1_bf16(svbool_t pg, const bfloat16_t *base) MODE_ATTR
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x bfloat> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x bfloat> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP4]]
//
svbfloat16_t test_svld1_vnum_bf16(svbool_t pg, const bfloat16_t *base, int64_t vnum) MODE_ATTR
diff --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sb.c b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sb.c
index 2757f28..eb40da2 100644
--- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sb.c
+++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sb.c
@@ -25,14 +25,14 @@
// CHECK-LABEL: @test_svld1sb_s16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 8 x i8> [[TMP1]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1sb_s16u10__SVBool_tPKa(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 8 x i8> [[TMP1]] to <vscale x 8 x i16>
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
//
@@ -44,14 +44,14 @@ svint16_t test_svld1sb_s16(svbool_t pg, const int8_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1sb_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i8> [[TMP1]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1sb_s32u10__SVBool_tPKa(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i8> [[TMP1]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
@@ -63,14 +63,14 @@ svint32_t test_svld1sb_s32(svbool_t pg, const int8_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1sb_s64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i8> [[TMP1]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1sb_s64u10__SVBool_tPKa(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i8> [[TMP1]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
@@ -82,14 +82,14 @@ svint64_t test_svld1sb_s64(svbool_t pg, const int8_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1sb_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 8 x i8> [[TMP1]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1sb_u16u10__SVBool_tPKa(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 8 x i8> [[TMP1]] to <vscale x 8 x i16>
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
//
@@ -101,14 +101,14 @@ svuint16_t test_svld1sb_u16(svbool_t pg, const int8_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1sb_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i8> [[TMP1]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1sb_u32u10__SVBool_tPKa(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i8> [[TMP1]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
@@ -120,14 +120,14 @@ svuint32_t test_svld1sb_u32(svbool_t pg, const int8_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1sb_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i8> [[TMP1]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1sb_u64u10__SVBool_tPKa(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i8> [[TMP1]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
@@ -143,7 +143,7 @@ svuint64_t test_svld1sb_u64(svbool_t pg, const int8_t *base) MODE_ATTR
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 8 x i8> [[TMP4]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP5]]
//
@@ -154,7 +154,7 @@ svuint64_t test_svld1sb_u64(svbool_t pg, const int8_t *base) MODE_ATTR
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 8 x i8> [[TMP4]] to <vscale x 8 x i16>
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP5]]
//
@@ -170,7 +170,7 @@ svint16_t test_svld1sb_vnum_s16(svbool_t pg, const int8_t *base, int64_t vnum) M
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 4 x i8> [[TMP4]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -181,7 +181,7 @@ svint16_t test_svld1sb_vnum_s16(svbool_t pg, const int8_t *base, int64_t vnum) M
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 4 x i8> [[TMP4]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -197,7 +197,7 @@ svint32_t test_svld1sb_vnum_s32(svbool_t pg, const int8_t *base, int64_t vnum) M
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 2 x i8> [[TMP4]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -208,7 +208,7 @@ svint32_t test_svld1sb_vnum_s32(svbool_t pg, const int8_t *base, int64_t vnum) M
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 2 x i8> [[TMP4]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -224,7 +224,7 @@ svint64_t test_svld1sb_vnum_s64(svbool_t pg, const int8_t *base, int64_t vnum) M
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 8 x i8> [[TMP4]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP5]]
//
@@ -235,7 +235,7 @@ svint64_t test_svld1sb_vnum_s64(svbool_t pg, const int8_t *base, int64_t vnum) M
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 8 x i8> [[TMP4]] to <vscale x 8 x i16>
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP5]]
//
@@ -251,7 +251,7 @@ svuint16_t test_svld1sb_vnum_u16(svbool_t pg, const int8_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 4 x i8> [[TMP4]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -262,7 +262,7 @@ svuint16_t test_svld1sb_vnum_u16(svbool_t pg, const int8_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 4 x i8> [[TMP4]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -278,7 +278,7 @@ svuint32_t test_svld1sb_vnum_u32(svbool_t pg, const int8_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 2 x i8> [[TMP4]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -289,7 +289,7 @@ svuint32_t test_svld1sb_vnum_u32(svbool_t pg, const int8_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 2 x i8> [[TMP4]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
diff --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sh.c b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sh.c
index dbc762f..e1cbb53 100644
--- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sh.c
+++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sh.c
@@ -25,14 +25,14 @@
// CHECK-LABEL: @test_svld1sh_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1sh_s32u10__SVBool_tPKs(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
@@ -44,14 +44,14 @@ svint32_t test_svld1sh_s32(svbool_t pg, const int16_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1sh_s64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1sh_s64u10__SVBool_tPKs(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
@@ -63,14 +63,14 @@ svint64_t test_svld1sh_s64(svbool_t pg, const int16_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1sh_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1sh_u32u10__SVBool_tPKs(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
@@ -82,14 +82,14 @@ svuint32_t test_svld1sh_u32(svbool_t pg, const int16_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1sh_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1sh_u64u10__SVBool_tPKs(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
@@ -105,7 +105,7 @@ svuint64_t test_svld1sh_u64(svbool_t pg, const int16_t *base) MODE_ATTR
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 4 x i16> [[TMP4]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -116,7 +116,7 @@ svuint64_t test_svld1sh_u64(svbool_t pg, const int16_t *base) MODE_ATTR
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 4 x i16> [[TMP4]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -132,7 +132,7 @@ svint32_t test_svld1sh_vnum_s32(svbool_t pg, const int16_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 2 x i16> [[TMP4]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -143,7 +143,7 @@ svint32_t test_svld1sh_vnum_s32(svbool_t pg, const int16_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 2 x i16> [[TMP4]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -159,7 +159,7 @@ svint64_t test_svld1sh_vnum_s64(svbool_t pg, const int16_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 4 x i16> [[TMP4]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -170,7 +170,7 @@ svint64_t test_svld1sh_vnum_s64(svbool_t pg, const int16_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 4 x i16> [[TMP4]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -186,7 +186,7 @@ svuint32_t test_svld1sh_vnum_u32(svbool_t pg, const int16_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 2 x i16> [[TMP4]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -197,7 +197,7 @@ svuint32_t test_svld1sh_vnum_u32(svbool_t pg, const int16_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 2 x i16> [[TMP4]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
diff --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sw.c b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sw.c
index 575d214..14ee095 100644
--- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sw.c
+++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sw.c
@@ -25,14 +25,14 @@
// CHECK-LABEL: @test_svld1sw_s64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1sw_s64u10__SVBool_tPKi(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
@@ -44,14 +44,14 @@ svint64_t test_svld1sw_s64(svbool_t pg, const int32_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1sw_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1sw_u64u10__SVBool_tPKi(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
@@ -67,7 +67,7 @@ svuint64_t test_svld1sw_u64(svbool_t pg, const int32_t *base) MODE_ATTR
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 2 x i32> [[TMP4]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -78,7 +78,7 @@ svuint64_t test_svld1sw_u64(svbool_t pg, const int32_t *base) MODE_ATTR
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 2 x i32> [[TMP4]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -94,7 +94,7 @@ svint64_t test_svld1sw_vnum_s64(svbool_t pg, const int32_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 2 x i32> [[TMP4]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -105,7 +105,7 @@ svint64_t test_svld1sw_vnum_s64(svbool_t pg, const int32_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 2 x i32> [[TMP4]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
diff --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1ub.c b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1ub.c
index 07e8815..3e0d2827 100644
--- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1ub.c
+++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1ub.c
@@ -25,14 +25,14 @@
// CHECK-LABEL: @test_svld1ub_s16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 8 x i8> [[TMP1]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1ub_s16u10__SVBool_tPKh(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 8 x i8> [[TMP1]] to <vscale x 8 x i16>
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
//
@@ -44,14 +44,14 @@ svint16_t test_svld1ub_s16(svbool_t pg, const uint8_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1ub_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 4 x i8> [[TMP1]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1ub_s32u10__SVBool_tPKh(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 4 x i8> [[TMP1]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
@@ -63,14 +63,14 @@ svint32_t test_svld1ub_s32(svbool_t pg, const uint8_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1ub_s64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 2 x i8> [[TMP1]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1ub_s64u10__SVBool_tPKh(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 2 x i8> [[TMP1]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
@@ -82,14 +82,14 @@ svint64_t test_svld1ub_s64(svbool_t pg, const uint8_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1ub_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 8 x i8> [[TMP1]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1ub_u16u10__SVBool_tPKh(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 8 x i8> [[TMP1]] to <vscale x 8 x i16>
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
//
@@ -101,14 +101,14 @@ svuint16_t test_svld1ub_u16(svbool_t pg, const uint8_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1ub_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 4 x i8> [[TMP1]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1ub_u32u10__SVBool_tPKh(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 4 x i8> [[TMP1]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
@@ -120,14 +120,14 @@ svuint32_t test_svld1ub_u32(svbool_t pg, const uint8_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1ub_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 2 x i8> [[TMP1]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1ub_u64u10__SVBool_tPKh(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 2 x i8> [[TMP1]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
@@ -143,7 +143,7 @@ svuint64_t test_svld1ub_u64(svbool_t pg, const uint8_t *base) MODE_ATTR
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 8 x i8> [[TMP4]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP5]]
//
@@ -154,7 +154,7 @@ svuint64_t test_svld1ub_u64(svbool_t pg, const uint8_t *base) MODE_ATTR
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 8 x i8> [[TMP4]] to <vscale x 8 x i16>
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP5]]
//
@@ -170,7 +170,7 @@ svint16_t test_svld1ub_vnum_s16(svbool_t pg, const uint8_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 4 x i8> [[TMP4]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -181,7 +181,7 @@ svint16_t test_svld1ub_vnum_s16(svbool_t pg, const uint8_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 4 x i8> [[TMP4]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -197,7 +197,7 @@ svint32_t test_svld1ub_vnum_s32(svbool_t pg, const uint8_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 2 x i8> [[TMP4]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -208,7 +208,7 @@ svint32_t test_svld1ub_vnum_s32(svbool_t pg, const uint8_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 2 x i8> [[TMP4]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -224,7 +224,7 @@ svint64_t test_svld1ub_vnum_s64(svbool_t pg, const uint8_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 8 x i8> [[TMP4]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP5]]
//
@@ -235,7 +235,7 @@ svint64_t test_svld1ub_vnum_s64(svbool_t pg, const uint8_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 8 x i8> [[TMP4]] to <vscale x 8 x i16>
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP5]]
//
@@ -251,7 +251,7 @@ svuint16_t test_svld1ub_vnum_u16(svbool_t pg, const uint8_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 4 x i8> [[TMP4]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -262,7 +262,7 @@ svuint16_t test_svld1ub_vnum_u16(svbool_t pg, const uint8_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 4 x i8> [[TMP4]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -278,7 +278,7 @@ svuint32_t test_svld1ub_vnum_u32(svbool_t pg, const uint8_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 2 x i8> [[TMP4]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -289,7 +289,7 @@ svuint32_t test_svld1ub_vnum_u32(svbool_t pg, const uint8_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 2 x i8> [[TMP4]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
diff --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1uh.c b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1uh.c
index 6d91c1e..18dfc082 100644
--- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1uh.c
+++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1uh.c
@@ -25,14 +25,14 @@
// CHECK-LABEL: @test_svld1uh_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1uh_s32u10__SVBool_tPKt(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
@@ -44,14 +44,14 @@ svint32_t test_svld1uh_s32(svbool_t pg, const uint16_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1uh_s64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1uh_s64u10__SVBool_tPKt(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
@@ -63,14 +63,14 @@ svint64_t test_svld1uh_s64(svbool_t pg, const uint16_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1uh_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1uh_u32u10__SVBool_tPKt(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
@@ -82,14 +82,14 @@ svuint32_t test_svld1uh_u32(svbool_t pg, const uint16_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1uh_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1uh_u64u10__SVBool_tPKt(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
@@ -105,7 +105,7 @@ svuint64_t test_svld1uh_u64(svbool_t pg, const uint16_t *base) MODE_ATTR
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 4 x i16> [[TMP4]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -116,7 +116,7 @@ svuint64_t test_svld1uh_u64(svbool_t pg, const uint16_t *base) MODE_ATTR
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 4 x i16> [[TMP4]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -132,7 +132,7 @@ svint32_t test_svld1uh_vnum_s32(svbool_t pg, const uint16_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 2 x i16> [[TMP4]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -143,7 +143,7 @@ svint32_t test_svld1uh_vnum_s32(svbool_t pg, const uint16_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 2 x i16> [[TMP4]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -159,7 +159,7 @@ svint64_t test_svld1uh_vnum_s64(svbool_t pg, const uint16_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 4 x i16> [[TMP4]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -170,7 +170,7 @@ svint64_t test_svld1uh_vnum_s64(svbool_t pg, const uint16_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 4 x i16> [[TMP4]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -186,7 +186,7 @@ svuint32_t test_svld1uh_vnum_u32(svbool_t pg, const uint16_t *base, int64_t vnum
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 2 x i16> [[TMP4]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -197,7 +197,7 @@ svuint32_t test_svld1uh_vnum_u32(svbool_t pg, const uint16_t *base, int64_t vnum
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 2 x i16> [[TMP4]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
diff --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1uw.c b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1uw.c
index 7be2398..62637ff 100644
--- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1uw.c
+++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1uw.c
@@ -25,14 +25,14 @@
// CHECK-LABEL: @test_svld1uw_s64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1uw_s64u10__SVBool_tPKj(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
@@ -44,14 +44,14 @@ svint64_t test_svld1uw_s64(svbool_t pg, const uint32_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1uw_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1uw_u64u10__SVBool_tPKj(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
@@ -67,7 +67,7 @@ svuint64_t test_svld1uw_u64(svbool_t pg, const uint32_t *base) MODE_ATTR
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 2 x i32> [[TMP4]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -78,7 +78,7 @@ svuint64_t test_svld1uw_u64(svbool_t pg, const uint32_t *base) MODE_ATTR
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 2 x i32> [[TMP4]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -94,7 +94,7 @@ svint64_t test_svld1uw_vnum_s64(svbool_t pg, const uint32_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 2 x i32> [[TMP4]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -105,7 +105,7 @@ svint64_t test_svld1uw_vnum_s64(svbool_t pg, const uint32_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 2 x i32> [[TMP4]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
diff --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1.c b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1.c
index 56f8c32..4d0005e 100644
--- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1.c
+++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1.c
@@ -24,12 +24,12 @@
// CHECK-LABEL: @test_svst1_s8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z13test_svst1_s8u10__SVBool_tPau10__SVInt8_t(
// CPP-CHECK-NEXT: entry:
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_s8(svbool_t pg, int8_t *base, svint8_t data) MODE_ATTR
@@ -40,13 +40,13 @@ void test_svst1_s8(svbool_t pg, int8_t *base, svint8_t data) MODE_ATTR
// CHECK-LABEL: @test_svst1_s16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z14test_svst1_s16u10__SVBool_tPsu11__SVInt16_t(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_s16(svbool_t pg, int16_t *base, svint16_t data) MODE_ATTR
@@ -57,13 +57,13 @@ void test_svst1_s16(svbool_t pg, int16_t *base, svint16_t data) MODE_ATTR
// CHECK-LABEL: @test_svst1_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z14test_svst1_s32u10__SVBool_tPiu11__SVInt32_t(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_s32(svbool_t pg, int32_t *base, svint32_t data) MODE_ATTR
@@ -74,13 +74,13 @@ void test_svst1_s32(svbool_t pg, int32_t *base, svint32_t data) MODE_ATTR
// CHECK-LABEL: @test_svst1_s64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z14test_svst1_s64u10__SVBool_tPlu11__SVInt64_t(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_s64(svbool_t pg, int64_t *base, svint64_t data) MODE_ATTR
@@ -90,12 +90,12 @@ void test_svst1_s64(svbool_t pg, int64_t *base, svint64_t data) MODE_ATTR
// CHECK-LABEL: @test_svst1_u8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z13test_svst1_u8u10__SVBool_tPhu11__SVUint8_t(
// CPP-CHECK-NEXT: entry:
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_u8(svbool_t pg, uint8_t *base, svuint8_t data) MODE_ATTR
@@ -106,13 +106,13 @@ void test_svst1_u8(svbool_t pg, uint8_t *base, svuint8_t data) MODE_ATTR
// CHECK-LABEL: @test_svst1_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z14test_svst1_u16u10__SVBool_tPtu12__SVUint16_t(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_u16(svbool_t pg, uint16_t *base, svuint16_t data) MODE_ATTR
@@ -123,13 +123,13 @@ void test_svst1_u16(svbool_t pg, uint16_t *base, svuint16_t data) MODE_ATTR
// CHECK-LABEL: @test_svst1_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z14test_svst1_u32u10__SVBool_tPju12__SVUint32_t(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_u32(svbool_t pg, uint32_t *base, svuint32_t data) MODE_ATTR
@@ -140,13 +140,13 @@ void test_svst1_u32(svbool_t pg, uint32_t *base, svuint32_t data) MODE_ATTR
// CHECK-LABEL: @test_svst1_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z14test_svst1_u64u10__SVBool_tPmu12__SVUint64_t(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_u64(svbool_t pg, uint64_t *base, svuint64_t data) MODE_ATTR
@@ -157,13 +157,13 @@ void test_svst1_u64(svbool_t pg, uint64_t *base, svuint64_t data) MODE_ATTR
// CHECK-LABEL: @test_svst1_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv8f16.p0(<vscale x 8 x half> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv8f16.p0(<vscale x 8 x half> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z14test_svst1_f16u10__SVBool_tPDhu13__SVFloat16_t(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8f16.p0(<vscale x 8 x half> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8f16.p0(<vscale x 8 x half> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_f16(svbool_t pg, float16_t *base, svfloat16_t data) MODE_ATTR
@@ -174,13 +174,13 @@ void test_svst1_f16(svbool_t pg, float16_t *base, svfloat16_t data) MODE_ATTR
// CHECK-LABEL: @test_svst1_f32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z14test_svst1_f32u10__SVBool_tPfu13__SVFloat32_t(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_f32(svbool_t pg, float32_t *base, svfloat32_t data) MODE_ATTR
@@ -191,13 +191,13 @@ void test_svst1_f32(svbool_t pg, float32_t *base, svfloat32_t data) MODE_ATTR
// CHECK-LABEL: @test_svst1_f64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z14test_svst1_f64u10__SVBool_tPdu13__SVFloat64_t(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_f64(svbool_t pg, float64_t *base, svfloat64_t data) MODE_ATTR
@@ -207,12 +207,12 @@ void test_svst1_f64(svbool_t pg, float64_t *base, svfloat64_t data) MODE_ATTR
// CHECK-LABEL: @test_svst1_mf8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z14test_svst1_mf8u10__SVBool_tPu6__mfp8u13__SVMfloat8_t(
// CPP-CHECK-NEXT: entry:
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_mf8(svbool_t pg, mfloat8_t *base, svmfloat8_t data) MODE_ATTR
@@ -226,7 +226,7 @@ void test_svst1_mf8(svbool_t pg, mfloat8_t *base, svmfloat8_t data) MODE_ATTR
// CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]]
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr align 1 [[TMP2]], <vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z18test_svst1_vnum_s8u10__SVBool_tPalu10__SVInt8_t(
@@ -235,7 +235,7 @@ void test_svst1_mf8(svbool_t pg, mfloat8_t *base, svmfloat8_t data) MODE_ATTR
// CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]]
// CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr align 1 [[TMP2]], <vscale x 16 x i1> [[PG:%.*]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_s8(svbool_t pg, int8_t *base, int64_t vnum, svint8_t data) MODE_ATTR
@@ -250,7 +250,7 @@ void test_svst1_vnum_s8(svbool_t pg, int8_t *base, int64_t vnum, svint8_t data)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z19test_svst1_vnum_s16u10__SVBool_tPslu11__SVInt16_t(
@@ -260,7 +260,7 @@ void test_svst1_vnum_s8(svbool_t pg, int8_t *base, int64_t vnum, svint8_t data)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_s16(svbool_t pg, int16_t *base, int64_t vnum, svint16_t data) MODE_ATTR
@@ -275,7 +275,7 @@ void test_svst1_vnum_s16(svbool_t pg, int16_t *base, int64_t vnum, svint16_t dat
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z19test_svst1_vnum_s32u10__SVBool_tPilu11__SVInt32_t(
@@ -285,7 +285,7 @@ void test_svst1_vnum_s16(svbool_t pg, int16_t *base, int64_t vnum, svint16_t dat
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_s32(svbool_t pg, int32_t *base, int64_t vnum, svint32_t data) MODE_ATTR
@@ -300,7 +300,7 @@ void test_svst1_vnum_s32(svbool_t pg, int32_t *base, int64_t vnum, svint32_t dat
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z19test_svst1_vnum_s64u10__SVBool_tPllu11__SVInt64_t(
@@ -310,7 +310,7 @@ void test_svst1_vnum_s32(svbool_t pg, int32_t *base, int64_t vnum, svint32_t dat
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_s64(svbool_t pg, int64_t *base, int64_t vnum, svint64_t data) MODE_ATTR
@@ -324,7 +324,7 @@ void test_svst1_vnum_s64(svbool_t pg, int64_t *base, int64_t vnum, svint64_t dat
// CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]]
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr align 1 [[TMP2]], <vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z18test_svst1_vnum_u8u10__SVBool_tPhlu11__SVUint8_t(
@@ -333,7 +333,7 @@ void test_svst1_vnum_s64(svbool_t pg, int64_t *base, int64_t vnum, svint64_t dat
// CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]]
// CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr align 1 [[TMP2]], <vscale x 16 x i1> [[PG:%.*]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_u8(svbool_t pg, uint8_t *base, int64_t vnum, svuint8_t data) MODE_ATTR
@@ -348,7 +348,7 @@ void test_svst1_vnum_u8(svbool_t pg, uint8_t *base, int64_t vnum, svuint8_t data
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z19test_svst1_vnum_u16u10__SVBool_tPtlu12__SVUint16_t(
@@ -358,7 +358,7 @@ void test_svst1_vnum_u8(svbool_t pg, uint8_t *base, int64_t vnum, svuint8_t data
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_u16(svbool_t pg, uint16_t *base, int64_t vnum, svuint16_t data) MODE_ATTR
@@ -373,7 +373,7 @@ void test_svst1_vnum_u16(svbool_t pg, uint16_t *base, int64_t vnum, svuint16_t d
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z19test_svst1_vnum_u32u10__SVBool_tPjlu12__SVUint32_t(
@@ -383,7 +383,7 @@ void test_svst1_vnum_u16(svbool_t pg, uint16_t *base, int64_t vnum, svuint16_t d
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_u32(svbool_t pg, uint32_t *base, int64_t vnum, svuint32_t data) MODE_ATTR
@@ -398,7 +398,7 @@ void test_svst1_vnum_u32(svbool_t pg, uint32_t *base, int64_t vnum, svuint32_t d
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z19test_svst1_vnum_u64u10__SVBool_tPmlu12__SVUint64_t(
@@ -408,7 +408,7 @@ void test_svst1_vnum_u32(svbool_t pg, uint32_t *base, int64_t vnum, svuint32_t d
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_u64(svbool_t pg, uint64_t *base, int64_t vnum, svuint64_t data) MODE_ATTR
@@ -423,7 +423,7 @@ void test_svst1_vnum_u64(svbool_t pg, uint64_t *base, int64_t vnum, svuint64_t d
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv8f16.p0(<vscale x 8 x half> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv8f16.p0(<vscale x 8 x half> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z19test_svst1_vnum_f16u10__SVBool_tPDhlu13__SVFloat16_t(
@@ -433,7 +433,7 @@ void test_svst1_vnum_u64(svbool_t pg, uint64_t *base, int64_t vnum, svuint64_t d
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8f16.p0(<vscale x 8 x half> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8f16.p0(<vscale x 8 x half> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_f16(svbool_t pg, float16_t *base, int64_t vnum, svfloat16_t data) MODE_ATTR
@@ -448,7 +448,7 @@ void test_svst1_vnum_f16(svbool_t pg, float16_t *base, int64_t vnum, svfloat16_t
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z19test_svst1_vnum_f32u10__SVBool_tPflu13__SVFloat32_t(
@@ -458,7 +458,7 @@ void test_svst1_vnum_f16(svbool_t pg, float16_t *base, int64_t vnum, svfloat16_t
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_f32(svbool_t pg, float32_t *base, int64_t vnum, svfloat32_t data) MODE_ATTR
@@ -473,7 +473,7 @@ void test_svst1_vnum_f32(svbool_t pg, float32_t *base, int64_t vnum, svfloat32_t
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z19test_svst1_vnum_f64u10__SVBool_tPdlu13__SVFloat64_t(
@@ -483,7 +483,7 @@ void test_svst1_vnum_f32(svbool_t pg, float32_t *base, int64_t vnum, svfloat32_t
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_f64(svbool_t pg, float64_t *base, int64_t vnum, svfloat64_t data) MODE_ATTR
@@ -497,7 +497,7 @@ void test_svst1_vnum_f64(svbool_t pg, float64_t *base, int64_t vnum, svfloat64_t
// CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]]
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr align 1 [[TMP2]], <vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z19test_svst1_vnum_mf8u10__SVBool_tPu6__mfp8lu13__SVMfloat8_t(
@@ -506,7 +506,7 @@ void test_svst1_vnum_f64(svbool_t pg, float64_t *base, int64_t vnum, svfloat64_t
// CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]]
// CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr align 1 [[TMP2]], <vscale x 16 x i1> [[PG:%.*]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_mf8(svbool_t pg, mfloat8_t *base, int64_t vnum, svmfloat8_t data) MODE_ATTR
@@ -1247,13 +1247,13 @@ void test_svst1_scatter_u64base_index_f64(svbool_t pg, svuint64_t bases, int64_t
// CHECK-LABEL: @test_svst1_bf16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv8bf16.p0(<vscale x 8 x bfloat> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv8bf16.p0(<vscale x 8 x bfloat> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z15test_svst1_bf16u10__SVBool_tPu6__bf16u14__SVBfloat16_t(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8bf16.p0(<vscale x 8 x bfloat> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8bf16.p0(<vscale x 8 x bfloat> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_bf16(svbool_t pg, bfloat16_t *base, svbfloat16_t data) MODE_ATTR
@@ -1268,7 +1268,7 @@ void test_svst1_bf16(svbool_t pg, bfloat16_t *base, svbfloat16_t data) MODE_ATTR
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv8bf16.p0(<vscale x 8 x bfloat> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv8bf16.p0(<vscale x 8 x bfloat> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z20test_svst1_vnum_bf16u10__SVBool_tPu6__bf16lu14__SVBfloat16_t(
@@ -1278,7 +1278,7 @@ void test_svst1_bf16(svbool_t pg, bfloat16_t *base, svbfloat16_t data) MODE_ATTR
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8bf16.p0(<vscale x 8 x bfloat> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8bf16.p0(<vscale x 8 x bfloat> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_bf16(svbool_t pg, bfloat16_t *base, int64_t vnum, svbfloat16_t data) MODE_ATTR
diff --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1b.c b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1b.c
index c908bc2..3ac49e2 100644
--- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1b.c
+++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1b.c
@@ -24,7 +24,7 @@
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 8 x i16> [[DATA:%.*]] to <vscale x 8 x i8>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP1]], ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1b_s16(svbool_t pg, int8_t *base, svint16_t data) MODE_ATTR
@@ -36,7 +36,7 @@ void test_svst1b_s16(svbool_t pg, int8_t *base, svint16_t data) MODE_ATTR
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i8>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP1]], ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1b_s32(svbool_t pg, int8_t *base, svint32_t data) MODE_ATTR
@@ -48,7 +48,7 @@ void test_svst1b_s32(svbool_t pg, int8_t *base, svint32_t data) MODE_ATTR
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i8>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i8.p0(<vscale x 2 x i8> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i8.p0(<vscale x 2 x i8> [[TMP1]], ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1b_s64(svbool_t pg, int8_t *base, svint64_t data) MODE_ATTR
@@ -60,7 +60,7 @@ void test_svst1b_s64(svbool_t pg, int8_t *base, svint64_t data) MODE_ATTR
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 8 x i16> [[DATA:%.*]] to <vscale x 8 x i8>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP1]], ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1b_u16(svbool_t pg, uint8_t *base, svuint16_t data) MODE_ATTR
@@ -72,7 +72,7 @@ void test_svst1b_u16(svbool_t pg, uint8_t *base, svuint16_t data) MODE_ATTR
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i8>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP1]], ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1b_u32(svbool_t pg, uint8_t *base, svuint32_t data) MODE_ATTR
@@ -84,7 +84,7 @@ void test_svst1b_u32(svbool_t pg, uint8_t *base, svuint32_t data) MODE_ATTR
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i8>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i8.p0(<vscale x 2 x i8> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i8.p0(<vscale x 2 x i8> [[TMP1]], ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1b_u64(svbool_t pg, uint8_t *base, svuint64_t data) MODE_ATTR
@@ -100,7 +100,7 @@ void test_svst1b_u64(svbool_t pg, uint8_t *base, svuint64_t data) MODE_ATTR
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
// CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 8 x i16> [[DATA:%.*]] to <vscale x 8 x i8>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP4]], ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1b_vnum_s16(svbool_t pg, int8_t *base, int64_t vnum, svint16_t data) MODE_ATTR
@@ -116,7 +116,7 @@ void test_svst1b_vnum_s16(svbool_t pg, int8_t *base, int64_t vnum, svint16_t dat
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
// CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i8>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP4]], ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1b_vnum_s32(svbool_t pg, int8_t *base, int64_t vnum, svint32_t data) MODE_ATTR
@@ -132,7 +132,7 @@ void test_svst1b_vnum_s32(svbool_t pg, int8_t *base, int64_t vnum, svint32_t dat
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
// CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i8>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i8.p0(<vscale x 2 x i8> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i8.p0(<vscale x 2 x i8> [[TMP4]], ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1b_vnum_s64(svbool_t pg, int8_t *base, int64_t vnum, svint64_t data) MODE_ATTR
@@ -148,7 +148,7 @@ void test_svst1b_vnum_s64(svbool_t pg, int8_t *base, int64_t vnum, svint64_t dat
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
// CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 8 x i16> [[DATA:%.*]] to <vscale x 8 x i8>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP4]], ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1b_vnum_u16(svbool_t pg, uint8_t *base, int64_t vnum, svuint16_t data) MODE_ATTR
@@ -164,7 +164,7 @@ void test_svst1b_vnum_u16(svbool_t pg, uint8_t *base, int64_t vnum, svuint16_t d
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
// CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i8>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP4]], ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1b_vnum_u32(svbool_t pg, uint8_t *base, int64_t vnum, svuint32_t data) MODE_ATTR
@@ -180,7 +180,7 @@ void test_svst1b_vnum_u32(svbool_t pg, uint8_t *base, int64_t vnum, svuint32_t d
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
// CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i8>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i8.p0(<vscale x 2 x i8> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i8.p0(<vscale x 2 x i8> [[TMP4]], ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1b_vnum_u64(svbool_t pg, uint8_t *base, int64_t vnum, svuint64_t data) MODE_ATTR
diff --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1h.c b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1h.c
index 959b658..1e3e0b2 100644
--- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1h.c
+++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1h.c
@@ -24,7 +24,7 @@
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i16.p0(<vscale x 4 x i16> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i16.p0(<vscale x 4 x i16> [[TMP1]], ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1h_s32(svbool_t pg, int16_t *base, svint32_t data) MODE_ATTR
@@ -36,7 +36,7 @@ void test_svst1h_s32(svbool_t pg, int16_t *base, svint32_t data) MODE_ATTR
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i16.p0(<vscale x 2 x i16> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i16.p0(<vscale x 2 x i16> [[TMP1]], ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1h_s64(svbool_t pg, int16_t *base, svint64_t data) MODE_ATTR
@@ -48,7 +48,7 @@ void test_svst1h_s64(svbool_t pg, int16_t *base, svint64_t data) MODE_ATTR
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i16.p0(<vscale x 4 x i16> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i16.p0(<vscale x 4 x i16> [[TMP1]], ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1h_u32(svbool_t pg, uint16_t *base, svuint32_t data) MODE_ATTR
@@ -60,7 +60,7 @@ void test_svst1h_u32(svbool_t pg, uint16_t *base, svuint32_t data) MODE_ATTR
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i16.p0(<vscale x 2 x i16> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i16.p0(<vscale x 2 x i16> [[TMP1]], ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1h_u64(svbool_t pg, uint16_t *base, svuint64_t data) MODE_ATTR
@@ -76,7 +76,7 @@ void test_svst1h_u64(svbool_t pg, uint16_t *base, svuint64_t data) MODE_ATTR
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
// CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i16.p0(<vscale x 4 x i16> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i16.p0(<vscale x 4 x i16> [[TMP4]], ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1h_vnum_s32(svbool_t pg, int16_t *base, int64_t vnum, svint32_t data) MODE_ATTR
@@ -92,7 +92,7 @@ void test_svst1h_vnum_s32(svbool_t pg, int16_t *base, int64_t vnum, svint32_t da
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
// CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i16.p0(<vscale x 2 x i16> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i16.p0(<vscale x 2 x i16> [[TMP4]], ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1h_vnum_s64(svbool_t pg, int16_t *base, int64_t vnum, svint64_t data) MODE_ATTR
@@ -108,7 +108,7 @@ void test_svst1h_vnum_s64(svbool_t pg, int16_t *base, int64_t vnum, svint64_t da
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
// CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i16.p0(<vscale x 4 x i16> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i16.p0(<vscale x 4 x i16> [[TMP4]], ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1h_vnum_u32(svbool_t pg, uint16_t *base, int64_t vnum, svuint32_t data) MODE_ATTR
@@ -124,7 +124,7 @@ void test_svst1h_vnum_u32(svbool_t pg, uint16_t *base, int64_t vnum, svuint32_t
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
// CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i16.p0(<vscale x 2 x i16> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i16.p0(<vscale x 2 x i16> [[TMP4]], ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1h_vnum_u64(svbool_t pg, uint16_t *base, int64_t vnum, svuint64_t data) MODE_ATTR
diff --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1w.c b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1w.c
index 3d9e45bd..1a12412 100644
--- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1w.c
+++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1w.c
@@ -24,7 +24,7 @@
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i32>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i32.p0(<vscale x 2 x i32> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i32.p0(<vscale x 2 x i32> [[TMP1]], ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1w_s64(svbool_t pg, int32_t *base, svint64_t data) MODE_ATTR
@@ -36,7 +36,7 @@ void test_svst1w_s64(svbool_t pg, int32_t *base, svint64_t data) MODE_ATTR
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i32>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i32.p0(<vscale x 2 x i32> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i32.p0(<vscale x 2 x i32> [[TMP1]], ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1w_u64(svbool_t pg, uint32_t *base, svuint64_t data) MODE_ATTR
@@ -52,7 +52,7 @@ void test_svst1w_u64(svbool_t pg, uint32_t *base, svuint64_t data) MODE_ATTR
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
// CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i32>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i32.p0(<vscale x 2 x i32> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i32.p0(<vscale x 2 x i32> [[TMP4]], ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1w_vnum_s64(svbool_t pg, int32_t *base, int64_t vnum, svint64_t data) MODE_ATTR
@@ -68,7 +68,7 @@ void test_svst1w_vnum_s64(svbool_t pg, int32_t *base, int64_t vnum, svint64_t da
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
// CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i32>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i32.p0(<vscale x 2 x i32> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i32.p0(<vscale x 2 x i32> [[TMP4]], ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1w_vnum_u64(svbool_t pg, uint32_t *base, int64_t vnum, svuint64_t data) MODE_ATTR
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfadd.c
new file mode 100644
index 0000000..d7734e0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfadd.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfclass.c
new file mode 100644
index 0000000..68814f4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfclass.c
@@ -0,0 +1,134 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1bf16.i64(<vscale x 1 x i16> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf4_u16mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2bf16.i64(<vscale x 2 x i16> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf2_u16mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4bf16.i64(<vscale x 4 x i16> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m1_u16m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8bf16.i64(<vscale x 8 x i16> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m2_u16m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16bf16.i64(<vscale x 16 x i16> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m4_u16m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32bf16.i64(<vscale x 32 x i16> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m8_u16m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16mf4_u16mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16mf2_u16mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m1_u16m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m2_u16m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m4_u16m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m8_u16m8_m(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmacc.c
new file mode 100644
index 0000000..616455d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmacc.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmadd.c
new file mode 100644
index 0000000..eec662a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmadd.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmax.c
new file mode 100644
index 0000000..dfdeb4e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmax.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmerge.c
new file mode 100644
index 0000000..96221c5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmerge.c
@@ -0,0 +1,69 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmerge_vfm_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmerge.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmerge_vfm_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, vbool64_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16mf4(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmerge_vfm_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmerge.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmerge_vfm_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, vbool32_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16mf2(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmerge_vfm_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmerge.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmerge_vfm_bf16m1(vbfloat16m1_t op1, __bf16 op2, vbool16_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m1(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmerge_vfm_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmerge.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmerge_vfm_bf16m2(vbfloat16m2_t op1, __bf16 op2, vbool8_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m2(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmerge_vfm_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmerge.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmerge_vfm_bf16m4(vbfloat16m4_t op1, __bf16 op2, vbool4_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m4(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmerge_vfm_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmerge.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmerge_vfm_bf16m8(vbfloat16m8_t op1, __bf16 op2, vbool2_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m8(op1, op2, mask, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmin.c
new file mode 100644
index 0000000..8f8d82ba
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmin.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsac.c
new file mode 100644
index 0000000..f4644df
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsac.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsub.c
new file mode 100644
index 0000000..07053afa
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmul.c
new file mode 100644
index 0000000..88fb329
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmul.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmv.c
new file mode 100644
index 0000000..d80ec3d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmv.c
@@ -0,0 +1,189 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmv_v_f_bf16mf4(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.v.f.nxv1bf16.i64(<vscale x 1 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmv_v_f_bf16mf4(__bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmv_v_f_bf16mf2(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.v.f.nxv2bf16.i64(<vscale x 2 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmv_v_f_bf16mf2(__bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmv_v_f_bf16m1(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.v.f.nxv4bf16.i64(<vscale x 4 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmv_v_f_bf16m1(__bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmv_v_f_bf16m2(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.v.f.nxv8bf16.i64(<vscale x 8 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmv_v_f_bf16m2(__bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmv_v_f_bf16m4(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.v.f.nxv16bf16.i64(<vscale x 16 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmv_v_f_bf16m4(__bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmv_v_f_bf16m8(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.v.f.nxv32bf16.i64(<vscale x 32 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmv_v_f_bf16m8(__bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16mf4_bf16(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv1bf16(<vscale x 1 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16mf4_bf16(vbfloat16mf4_t src) {
+ return __riscv_vfmv_f_s_bf16mf4_bf16(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmv_s_f_bf16mf4(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.s.f.nxv1bf16.i64(<vscale x 1 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmv_s_f_bf16mf4(__bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16mf2_bf16(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv2bf16(<vscale x 2 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16mf2_bf16(vbfloat16mf2_t src) {
+ return __riscv_vfmv_f_s_bf16mf2_bf16(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmv_s_f_bf16mf2(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.s.f.nxv2bf16.i64(<vscale x 2 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmv_s_f_bf16mf2(__bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m1_bf16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv4bf16(<vscale x 4 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m1_bf16(vbfloat16m1_t src) {
+ return __riscv_vfmv_f_s_bf16m1_bf16(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmv_s_f_bf16m1(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.s.f.nxv4bf16.i64(<vscale x 4 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmv_s_f_bf16m1(__bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m2_bf16(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv8bf16(<vscale x 8 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m2_bf16(vbfloat16m2_t src) {
+ return __riscv_vfmv_f_s_bf16m2_bf16(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmv_s_f_bf16m2(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.s.f.nxv8bf16.i64(<vscale x 8 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmv_s_f_bf16m2(__bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m4_bf16(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv16bf16(<vscale x 16 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m4_bf16(vbfloat16m4_t src) {
+ return __riscv_vfmv_f_s_bf16m4_bf16(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmv_s_f_bf16m4(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.s.f.nxv16bf16.i64(<vscale x 16 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmv_s_f_bf16m4(__bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m8_bf16(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv32bf16(<vscale x 32 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m8_bf16(vbfloat16m8_t src) {
+ return __riscv_vfmv_f_s_bf16m8_bf16(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmv_s_f_bf16m8(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.s.f.nxv32bf16.i64(<vscale x 32 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmv_s_f_bf16m8(__bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m8(src, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt.c
new file mode 100644
index 0000000..a5afab9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt.c
@@ -0,0 +1,724 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm(vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm(vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm(vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm(vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm(vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(vbool32_t vm,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(vbool32_t vm,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(vbool16_t vm,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rod.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rod.c
new file mode 100644
index 0000000..70c377b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rod.c
@@ -0,0 +1,113 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m4_m(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rtz.c
new file mode 100644
index 0000000..854e986
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rtz.c
@@ -0,0 +1,267 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(vbool32_t vm,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(vbool32_t vm,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(vbool16_t vm,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmacc.c
new file mode 100644
index 0000000..1848488
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmacc.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmadd.c
new file mode 100644
index 0000000..e519e5a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmadd.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsac.c
new file mode 100644
index 0000000..47e1f44
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsac.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsub.c
new file mode 100644
index 0000000..4b55b64
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrec7.c
new file mode 100644
index 0000000..1ffee73
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrec7.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4(vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2(vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1(vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m1(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2(vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4(vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8(vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m8(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf4_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf2_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m1_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m2_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m4_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m8_m(mask, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsqrt7.c
new file mode 100644
index 0000000..964c486
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsqrt7.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4(vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2(vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1(vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m1(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2(vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4(vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8(vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m8(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf4_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf2_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m1_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m2_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m4_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m8_m(mask, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsub.c
new file mode 100644
index 0000000..c7c3869e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsub.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnj.c
new file mode 100644
index 0000000..778b8b83
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnj.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjn.c
new file mode 100644
index 0000000..7de3089
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjn.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjx.c
new file mode 100644
index 0000000..5fa285cc
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjx.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1down.c
new file mode 100644
index 0000000..b94d26b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1down.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4(vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf4(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2(vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf2(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1(vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m1(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2(vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m2(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4(vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m4(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8(vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m8(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf4_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf2_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m1_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m2_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m4_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m8_m(mask, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1up.c
new file mode 100644
index 0000000..06e8b49
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1up.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4(vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf4(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2(vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf2(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1(vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m1(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2(vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m2(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4(vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m4(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8(vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m8(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf4_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf2_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m1_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m2_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m4_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m8_m(mask, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsub.c
new file mode 100644
index 0000000..2423b0b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwadd.c
new file mode 100644
index 0000000..24d34f4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwadd.c
@@ -0,0 +1,899 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1(vfloat32m1_t vs2, vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm(vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwcvt.c
new file mode 100644
index 0000000..fb3e003
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwcvt.c
@@ -0,0 +1,366 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4(vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2(vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1(vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2(vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4(vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8(vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4(vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2(vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1(vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2(vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4(vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8(vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m1_f32m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m2_f32m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m4_f32m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_m(vbool64_t vm, vint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_m(vbool32_t vm, vint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_m(vbool16_t vm, vint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_m(vbool8_t vm, vint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_m(vbool4_t vm, vint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_m(vbool2_t vm, vint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_m(vbool64_t vm, vuint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_m(vbool32_t vm, vuint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_m(vbool16_t vm, vuint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_m(vbool8_t vm, vuint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_m(vbool4_t vm, vuint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_m(vbool2_t vm, vuint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_m(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmacc.c
new file mode 100644
index 0000000..be09003
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmacc.c
@@ -0,0 +1,486 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmsac.c
new file mode 100644
index 0000000..7490813
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmsac.c
@@ -0,0 +1,486 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmul.c
new file mode 100644
index 0000000..6783ba4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmul.c
@@ -0,0 +1,455 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmacc.c
new file mode 100644
index 0000000..6127a94
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmacc.c
@@ -0,0 +1,494 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmsac.c
new file mode 100644
index 0000000..f37dd31
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmsac.c
@@ -0,0 +1,494 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwsub.c
new file mode 100644
index 0000000..510ff91
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwsub.c
@@ -0,0 +1,899 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1(vfloat32m1_t vs2, vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm(vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfeq.c
new file mode 100644
index 0000000..669d042
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfeq.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfge.c
new file mode 100644
index 0000000..b169efd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfge.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfgt.c
new file mode 100644
index 0000000..9aea7d2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfgt.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfle.c
new file mode 100644
index 0000000..40f0c27
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfle.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmflt.c
new file mode 100644
index 0000000..f64eee3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmflt.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfne.c
new file mode 100644
index 0000000..809ea56
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfne.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfadd.c
new file mode 100644
index 0000000..9d6b071c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfadd.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfclass.c
new file mode 100644
index 0000000..2760f85
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfclass.c
@@ -0,0 +1,134 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1bf16.i64(<vscale x 1 x i16> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2bf16.i64(<vscale x 2 x i16> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4bf16.i64(<vscale x 4 x i16> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8bf16.i64(<vscale x 8 x i16> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16bf16.i64(<vscale x 16 x i16> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32bf16.i64(<vscale x 32 x i16> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfclass(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfclass(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfclass(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfclass(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfclass(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfclass(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmacc.c
new file mode 100644
index 0000000..ae3f1f2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmacc.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmadd.c
new file mode 100644
index 0000000..db2184c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmadd.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmax.c
new file mode 100644
index 0000000..66497bf
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmax.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmerge.c
new file mode 100644
index 0000000..1dc290b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmerge.c
@@ -0,0 +1,69 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmerge_vfm_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmerge.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmerge_vfm_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, vbool64_t mask, size_t vl) {
+ return __riscv_vfmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmerge_vfm_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmerge.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmerge_vfm_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, vbool32_t mask, size_t vl) {
+ return __riscv_vfmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmerge_vfm_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmerge.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmerge_vfm_bf16m1(vbfloat16m1_t op1, __bf16 op2, vbool16_t mask, size_t vl) {
+ return __riscv_vfmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmerge_vfm_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmerge.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmerge_vfm_bf16m2(vbfloat16m2_t op1, __bf16 op2, vbool8_t mask, size_t vl) {
+ return __riscv_vfmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmerge_vfm_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmerge.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmerge_vfm_bf16m4(vbfloat16m4_t op1, __bf16 op2, vbool4_t mask, size_t vl) {
+ return __riscv_vfmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmerge_vfm_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmerge.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmerge_vfm_bf16m8(vbfloat16m8_t op1, __bf16 op2, vbool2_t mask, size_t vl) {
+ return __riscv_vfmerge(op1, op2, mask, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmin.c
new file mode 100644
index 0000000..1564d11
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmin.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsac.c
new file mode 100644
index 0000000..0384e7d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsac.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsub.c
new file mode 100644
index 0000000..306f189
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmul.c
new file mode 100644
index 0000000..fffd83a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmul.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmv.c
new file mode 100644
index 0000000..f85378f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmv.c
@@ -0,0 +1,69 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16mf4_bf16(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[SRC:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv1bf16(<vscale x 1 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16mf4_bf16(vbfloat16mf4_t src) {
+ return __riscv_vfmv_f(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16mf2_bf16(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv2bf16(<vscale x 2 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16mf2_bf16(vbfloat16mf2_t src) {
+ return __riscv_vfmv_f(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m1_bf16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv4bf16(<vscale x 4 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m1_bf16(vbfloat16m1_t src) {
+ return __riscv_vfmv_f(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m2_bf16(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv8bf16(<vscale x 8 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m2_bf16(vbfloat16m2_t src) {
+ return __riscv_vfmv_f(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m4_bf16(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv16bf16(<vscale x 16 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m4_bf16(vbfloat16m4_t src) {
+ return __riscv_vfmv_f(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m8_bf16(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv32bf16(<vscale x 32 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m8_bf16(vbfloat16m8_t src) {
+ return __riscv_vfmv_f(src);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt.c
new file mode 100644
index 0000000..fb635d6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt.c
@@ -0,0 +1,724 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm(vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm(vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm(vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm(vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm(vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(vbool32_t vm,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(vbool32_t vm,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(vbool16_t vm,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rod.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rod.c
new file mode 100644
index 0000000..1ad856d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rod.c
@@ -0,0 +1,113 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rtz.c
new file mode 100644
index 0000000..12d0893
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rtz.c
@@ -0,0 +1,267 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(vbool32_t vm,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(vbool32_t vm,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(vbool16_t vm,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmacc.c
new file mode 100644
index 0000000..6f7928b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmacc.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmadd.c
new file mode 100644
index 0000000..97d2070
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmadd.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsac.c
new file mode 100644
index 0000000..404b4f8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsac.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsub.c
new file mode 100644
index 0000000..3a520dd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrec7.c
new file mode 100644
index 0000000..462b6ac
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrec7.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4(vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2(vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1(vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2(vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4(vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8(vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsqrt7.c
new file mode 100644
index 0000000..051fde7
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsqrt7.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4(vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2(vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1(vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2(vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4(vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8(vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(mask, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsub.c
new file mode 100644
index 0000000..0494182
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsub.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnj.c
new file mode 100644
index 0000000..615dedd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnj.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjn.c
new file mode 100644
index 0000000..a895e5f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjn.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjx.c
new file mode 100644
index 0000000..0187516
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjx.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1down.c
new file mode 100644
index 0000000..4a76894
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1down.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4(vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2(vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1(vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2(vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4(vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8(vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(mask, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1up.c
new file mode 100644
index 0000000..f9f2dc0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1up.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4(vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2(vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1(vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2(vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4(vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8(vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(mask, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsub.c
new file mode 100644
index 0000000..ebcf6fa
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwadd.c
new file mode 100644
index 0000000..124e7fb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwadd.c
@@ -0,0 +1,893 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1(vfloat32m1_t vs2, vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm(vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwcvt.c
new file mode 100644
index 0000000..0399a63
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwcvt.c
@@ -0,0 +1,366 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4(vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2(vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1(vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2(vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4(vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8(vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4(vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2(vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1(vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2(vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4(vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8(vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_m(vbool64_t vm, vint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_m(vbool32_t vm, vint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_m(vbool16_t vm, vint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_m(vbool8_t vm, vint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_m(vbool4_t vm, vint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_m(vbool2_t vm, vint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_m(vbool64_t vm, vuint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_m(vbool32_t vm, vuint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_m(vbool16_t vm, vuint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_m(vbool8_t vm, vuint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_m(vbool4_t vm, vuint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_m(vbool2_t vm, vuint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmacc.c
new file mode 100644
index 0000000..2eb7fc8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmacc.c
@@ -0,0 +1,474 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmsac.c
new file mode 100644
index 0000000..28f5076
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmsac.c
@@ -0,0 +1,474 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmul.c
new file mode 100644
index 0000000..8de49fa
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmul.c
@@ -0,0 +1,451 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmacc.c
new file mode 100644
index 0000000..7836931
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmacc.c
@@ -0,0 +1,480 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmsac.c
new file mode 100644
index 0000000..ca936af
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmsac.c
@@ -0,0 +1,480 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwsub.c
new file mode 100644
index 0000000..2e22e22
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwsub.c
@@ -0,0 +1,893 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1(vfloat32m1_t vs2, vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm(vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfeq.c
new file mode 100644
index 0000000..29881c9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfeq.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfge.c
new file mode 100644
index 0000000..b8083c5e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfge.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfgt.c
new file mode 100644
index 0000000..b8749b3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfgt.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfle.c
new file mode 100644
index 0000000..724608c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfle.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmflt.c
new file mode 100644
index 0000000..1b0b898
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmflt.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfne.c
new file mode 100644
index 0000000..672c150
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfne.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfadd.c
new file mode 100644
index 0000000..6d55279
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfadd.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfclass.c
new file mode 100644
index 0000000..8e6946d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfclass.c
@@ -0,0 +1,272 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tu(vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf4_u16mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tu(vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf2_u16mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_tu(vuint16m1_t vd, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m1_u16m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_tu(vuint16m2_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m2_u16m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_tu(vuint16m4_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m4_u16m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_tu(vuint16m8_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m8_u16m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf4_u16mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf2_u16mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m1_u16m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_tum(vbool8_t vm, vuint16m2_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m2_u16m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_tum(vbool4_t vm, vuint16m4_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m4_u16m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_tum(vbool2_t vm, vuint16m8_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m8_u16m8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf4_u16mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf2_u16mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_tumu(vbool16_t vm, vuint16m1_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m1_u16m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_tumu(vbool8_t vm, vuint16m2_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m2_u16m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_tumu(vbool4_t vm, vuint16m4_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m4_u16m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_tumu(vbool2_t vm, vuint16m8_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m8_u16m8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf4_u16mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf2_u16mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_mu(vbool16_t vm, vuint16m1_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m1_u16m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_mu(vbool8_t vm, vuint16m2_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m2_u16m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_mu(vbool4_t vm, vuint16m4_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m4_u16m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_mu(vbool2_t vm, vuint16m8_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m8_u16m8_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmacc.c
new file mode 100644
index 0000000..2d4e481
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmacc.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmadd.c
new file mode 100644
index 0000000..511e073
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmadd.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmax.c
new file mode 100644
index 0000000..f3698d4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmax.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmerge.c
new file mode 100644
index 0000000..bcaf2cb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmerge.c
@@ -0,0 +1,69 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmerge_vfm_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmerge.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmerge_vfm_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, vbool64_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16mf4_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmerge_vfm_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmerge.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmerge_vfm_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, vbool32_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16mf2_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmerge_vfm_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmerge.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmerge_vfm_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, vbool16_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m1_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmerge_vfm_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmerge.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmerge_vfm_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, vbool8_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m2_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmerge_vfm_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmerge.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmerge_vfm_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, vbool4_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m4_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmerge_vfm_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmerge.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmerge_vfm_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, vbool2_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m8_tu(maskedoff, op1, op2, mask, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmin.c
new file mode 100644
index 0000000..911f879
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmin.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsac.c
new file mode 100644
index 0000000..9575ad3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsac.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsub.c
new file mode 100644
index 0000000..8e382f71
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsub.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmul.c
new file mode 100644
index 0000000..716f056
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmul.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmv.c
new file mode 100644
index 0000000..069ee6a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmv.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmv_v_f_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.v.f.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmv_v_f_bf16mf4_tu(vbfloat16mf4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16mf4_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmv_v_f_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.v.f.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmv_v_f_bf16mf2_tu(vbfloat16mf2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16mf2_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmv_v_f_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.v.f.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmv_v_f_bf16m1_tu(vbfloat16m1_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m1_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmv_v_f_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.v.f.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmv_v_f_bf16m2_tu(vbfloat16m2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m2_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmv_v_f_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.v.f.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmv_v_f_bf16m4_tu(vbfloat16m4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m4_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmv_v_f_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.v.f.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmv_v_f_bf16m8_tu(vbfloat16m8_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m8_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmv_s_f_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.s.f.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmv_s_f_bf16mf4_tu(vbfloat16mf4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16mf4_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmv_s_f_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.s.f.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmv_s_f_bf16mf2_tu(vbfloat16mf2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16mf2_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmv_s_f_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.s.f.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmv_s_f_bf16m1_tu(vbfloat16m1_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m1_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmv_s_f_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.s.f.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmv_s_f_bf16m2_tu(vbfloat16m2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m2_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmv_s_f_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.s.f.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmv_s_f_bf16m4_tu(vbfloat16m4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m4_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmv_s_f_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.s.f.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmv_s_f_bf16m8_tu(vbfloat16m8_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m8_tu(maskedoff, src, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt.c
new file mode 100644
index 0000000..36d4fc3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt.c
@@ -0,0 +1,1577 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tu(vint8mf8_t vd, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tu(vint8mf4_t vd, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tu(vint8mf2_t vd, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tu(vint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tu(vint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tu(vint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(vuint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(vuint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tu(vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tu(vuint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tu(vuint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tu(vuint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tu(vbfloat16m1_t vd, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tu(vbfloat16m2_t vd, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tu(vbfloat16m4_t vd, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tum(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tum(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tum(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tum(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tum(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tum(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tum(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tum(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tum(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tumu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tumu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tumu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tumu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tumu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tumu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_mu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_mu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_mu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_mu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_mu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_mu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_mu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_mu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_mu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(vint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(vint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(vint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tu(vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tu(vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tu(vbfloat16m1_t vd, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tu(vbfloat16m2_t vd, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tu(vbfloat16m4_t vd, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(vbool16_t vm,
+ vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tumu(vbool64_t vm,
+ vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tumu(vbool32_t vm,
+ vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rod.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rod.c
new file mode 100644
index 0000000..8406684
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rod.c
@@ -0,0 +1,233 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tu(vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tu(vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tu(vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tum(vbool64_t vm,
+ vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tum(vbool32_t vm,
+ vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tumu(vbool64_t vm,
+ vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tumu(vbool32_t vm,
+ vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m4_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rtz.c
new file mode 100644
index 0000000..4644eff
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rtz.c
@@ -0,0 +1,572 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(vint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(vint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(vint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(vbool16_t vm,
+ vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(vbool16_t vm,
+ vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmacc.c
new file mode 100644
index 0000000..93fd6ba
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmacc.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmadd.c
new file mode 100644
index 0000000..d7e6b82
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmadd.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsac.c
new file mode 100644
index 0000000..e0c289d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsac.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsub.c
new file mode 100644
index 0000000..05ccda3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsub.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrec7.c
new file mode 100644
index 0000000..3123692
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrec7.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf4_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf2_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m1_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m2_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m4_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m8_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf4_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf2_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m1_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m2_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m4_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m8_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf4_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf2_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m1_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m2_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m4_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m8_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf4_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf2_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m1_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m2_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m4_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m8_mu(mask, maskedoff, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsqrt7.c
new file mode 100644
index 0000000..8436f0e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsqrt7.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf4_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf2_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m1_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m2_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m4_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m8_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf4_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf2_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m1_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m2_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m4_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m8_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf4_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf2_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m1_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m2_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m4_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m8_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf4_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf2_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m1_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m2_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m4_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m8_mu(mask, maskedoff, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsub.c
new file mode 100644
index 0000000..7dd2bb6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnj.c
new file mode 100644
index 0000000..b39a0be
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnj.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjn.c
new file mode 100644
index 0000000..7542e78
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjn.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjx.c
new file mode 100644
index 0000000..104149e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjx.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1down.c
new file mode 100644
index 0000000..228dc1cd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1down.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf4_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf2_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m1_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m2_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m4_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m8_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf4_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf2_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m1_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m2_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m4_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m8_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf4_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf2_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m1_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m2_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m4_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m8_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf4_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf2_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m1_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m2_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m4_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m8_mu(mask, maskedoff, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1up.c
new file mode 100644
index 0000000..9e6ff2b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1up.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf4_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf2_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m1_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m2_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m4_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m8_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf4_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf2_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m1_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m2_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m4_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m8_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf4_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf2_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m1_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m2_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m4_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m8_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf4_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf2_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m1_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m2_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m4_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m8_mu(mask, maskedoff, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsub.c
new file mode 100644
index 0000000..b6fd94e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsub.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwadd.c
new file mode 100644
index 0000000..4bee376
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwadd.c
@@ -0,0 +1,2007 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwcvt.c
new file mode 100644
index 0000000..9151319
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwcvt.c
@@ -0,0 +1,765 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tu(vbfloat16mf4_t vd, vint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tu(vbfloat16mf2_t vd, vint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tu(vbfloat16m1_t vd, vint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tu(vbfloat16m2_t vd, vint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tu(vbfloat16m4_t vd, vint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tu(vbfloat16m8_t vd, vint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tu(vbfloat16mf4_t vd, vuint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tu(vbfloat16mf2_t vd, vuint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tu(vbfloat16m1_t vd, vuint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tu(vbfloat16m2_t vd, vuint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tu(vbfloat16m4_t vd, vuint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tu(vbfloat16m8_t vd, vuint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd,
+ vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd,
+ vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd,
+ vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd,
+ vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd,
+ vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd,
+ vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmacc.c
new file mode 100644
index 0000000..f67b100
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmacc.c
@@ -0,0 +1,1017 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmsac.c
new file mode 100644
index 0000000..6d78c74
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmsac.c
@@ -0,0 +1,1017 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmul.c
new file mode 100644
index 0000000..9fcfe81
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmul.c
@@ -0,0 +1,1015 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmacc.c
new file mode 100644
index 0000000..73cc822
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmacc.c
@@ -0,0 +1,1034 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmsac.c
new file mode 100644
index 0000000..6133230
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmsac.c
@@ -0,0 +1,1034 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwsub.c
new file mode 100644
index 0000000..9d9b0b0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwsub.c
@@ -0,0 +1,2007 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfeq.c
new file mode 100644
index 0000000..b96aae5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfeq.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfge.c
new file mode 100644
index 0000000..47d0427
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfge.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfgt.c
new file mode 100644
index 0000000..0a0ead2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfgt.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfle.c
new file mode 100644
index 0000000..27ddefe
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfle.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmflt.c
new file mode 100644
index 0000000..d5f4f77
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmflt.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfne.c
new file mode 100644
index 0000000..c2df947
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfne.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfadd.c
new file mode 100644
index 0000000..2bd3b39
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfadd.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfclass.c
new file mode 100644
index 0000000..e2a993a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfclass.c
@@ -0,0 +1,272 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tu(vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tu(vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_tu(vuint16m1_t vd, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_tu(vuint16m2_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_tu(vuint16m4_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_tu(vuint16m8_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_tum(vbool8_t vm, vuint16m2_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_tum(vbool4_t vm, vuint16m4_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_tum(vbool2_t vm, vuint16m8_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_tumu(vbool16_t vm, vuint16m1_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_tumu(vbool8_t vm, vuint16m2_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_tumu(vbool4_t vm, vuint16m4_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_tumu(vbool2_t vm, vuint16m8_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_mu(vbool16_t vm, vuint16m1_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_mu(vbool8_t vm, vuint16m2_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_mu(vbool4_t vm, vuint16m4_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_mu(vbool2_t vm, vuint16m8_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmacc.c
new file mode 100644
index 0000000..eb74271
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmacc.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmadd.c
new file mode 100644
index 0000000..68d490d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmadd.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmax.c
new file mode 100644
index 0000000..5f682e8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmax.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmerge.c
new file mode 100644
index 0000000..9593ad5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmerge.c
@@ -0,0 +1,69 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmerge_vfm_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmerge.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmerge_vfm_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, vbool64_t mask, size_t vl) {
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmerge_vfm_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmerge.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmerge_vfm_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, vbool32_t mask, size_t vl) {
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmerge_vfm_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmerge.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmerge_vfm_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, vbool16_t mask, size_t vl) {
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmerge_vfm_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmerge.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmerge_vfm_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, vbool8_t mask, size_t vl) {
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmerge_vfm_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmerge.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmerge_vfm_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, vbool4_t mask, size_t vl) {
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmerge_vfm_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmerge.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmerge_vfm_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, vbool2_t mask, size_t vl) {
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmin.c
new file mode 100644
index 0000000..f3ef3c3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmin.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsac.c
new file mode 100644
index 0000000..0587c57
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsac.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsub.c
new file mode 100644
index 0000000..2ad26f8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsub.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmul.c
new file mode 100644
index 0000000..d1e726a9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmul.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmv.c
new file mode 100644
index 0000000..9fd1ffc
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmv.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmv_v_f_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.v.f.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmv_v_f_bf16mf4_tu(vbfloat16mf4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmv_v_f_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.v.f.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmv_v_f_bf16mf2_tu(vbfloat16mf2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmv_v_f_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.v.f.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmv_v_f_bf16m1_tu(vbfloat16m1_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmv_v_f_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.v.f.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmv_v_f_bf16m2_tu(vbfloat16m2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmv_v_f_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.v.f.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmv_v_f_bf16m4_tu(vbfloat16m4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmv_v_f_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.v.f.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmv_v_f_bf16m8_tu(vbfloat16m8_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmv_s_f_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.s.f.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmv_s_f_bf16mf4_tu(vbfloat16mf4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmv_s_f_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.s.f.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmv_s_f_bf16mf2_tu(vbfloat16mf2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmv_s_f_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.s.f.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmv_s_f_bf16m1_tu(vbfloat16m1_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmv_s_f_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.s.f.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmv_s_f_bf16m2_tu(vbfloat16m2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmv_s_f_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.s.f.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmv_s_f_bf16m4_tu(vbfloat16m4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmv_s_f_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.s.f.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmv_s_f_bf16m8_tu(vbfloat16m8_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt.c
new file mode 100644
index 0000000..c6cd0a5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt.c
@@ -0,0 +1,1539 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tu(vint8mf8_t vd, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tu(vint8mf4_t vd, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tu(vint8mf2_t vd, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tu(vint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tu(vint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tu(vint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(vuint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(vuint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tu(vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tu(vuint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tu(vuint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tu(vuint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tu(vbfloat16m1_t vd, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tu(vbfloat16m2_t vd, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tu(vbfloat16m4_t vd, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tum(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tum(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tum(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tum(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tum(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tum(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tum(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tum(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tum(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tumu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tumu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tumu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tumu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tumu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tumu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_mu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_mu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_mu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_mu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_mu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_mu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_mu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_mu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_mu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(vint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(vint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(vint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tu(vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tu(vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tu(vbfloat16m1_t vd, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tu(vbfloat16m2_t vd, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tu(vbfloat16m4_t vd, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(vbool16_t vm,
+ vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tumu(vbool64_t vm,
+ vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tumu(vbool32_t vm,
+ vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rod.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rod.c
new file mode 100644
index 0000000..0745633
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rod.c
@@ -0,0 +1,233 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tu(vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tu(vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tu(vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tum(vbool64_t vm,
+ vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tum(vbool32_t vm,
+ vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tumu(vbool64_t vm,
+ vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tumu(vbool32_t vm,
+ vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rtz.c
new file mode 100644
index 0000000..b906c5f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rtz.c
@@ -0,0 +1,572 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(vint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(vint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(vint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(vbool16_t vm,
+ vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(vbool16_t vm,
+ vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmacc.c
new file mode 100644
index 0000000..cc487b4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmacc.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmadd.c
new file mode 100644
index 0000000..f9c348b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmadd.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsac.c
new file mode 100644
index 0000000..83d35e8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsac.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsub.c
new file mode 100644
index 0000000..f5282a1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsub.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrec7.c
new file mode 100644
index 0000000..f8e5a33
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrec7.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsqrt7.c
new file mode 100644
index 0000000..7c6c926
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsqrt7.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsub.c
new file mode 100644
index 0000000..c09caeb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnj.c
new file mode 100644
index 0000000..c1f69932
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnj.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjn.c
new file mode 100644
index 0000000..1b799d8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjn.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjx.c
new file mode 100644
index 0000000..9c5f2af
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjx.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1down.c
new file mode 100644
index 0000000..691302e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1down.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1up.c
new file mode 100644
index 0000000..1238d22
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1up.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsub.c
new file mode 100644
index 0000000..ea4f8f0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsub.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwadd.c
new file mode 100644
index 0000000..e5b7b8d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwadd.c
@@ -0,0 +1,1932 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwcvt.c
new file mode 100644
index 0000000..7300104
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwcvt.c
@@ -0,0 +1,765 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tu(vbfloat16mf4_t vd, vint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tu(vbfloat16mf2_t vd, vint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tu(vbfloat16m1_t vd, vint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tu(vbfloat16m2_t vd, vint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tu(vbfloat16m4_t vd, vint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tu(vbfloat16m8_t vd, vint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tu(vbfloat16mf4_t vd, vuint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tu(vbfloat16mf2_t vd, vuint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tu(vbfloat16m1_t vd, vuint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tu(vbfloat16m2_t vd, vuint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tu(vbfloat16m4_t vd, vuint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tu(vbfloat16m8_t vd, vuint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd,
+ vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd,
+ vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd,
+ vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd,
+ vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd,
+ vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd,
+ vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmacc.c
new file mode 100644
index 0000000..b05f8802
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmacc.c
@@ -0,0 +1,977 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmsac.c
new file mode 100644
index 0000000..93721f6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmsac.c
@@ -0,0 +1,977 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmul.c
new file mode 100644
index 0000000..4a2b5e3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmul.c
@@ -0,0 +1,975 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmacc.c
new file mode 100644
index 0000000..57e43344
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmacc.c
@@ -0,0 +1,994 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmsac.c
new file mode 100644
index 0000000..42da060
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmsac.c
@@ -0,0 +1,994 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwsub.c
new file mode 100644
index 0000000..1378bc9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwsub.c
@@ -0,0 +1,1932 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfeq.c
new file mode 100644
index 0000000..3945f82
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfeq.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfge.c
new file mode 100644
index 0000000..82586da
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfge.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfgt.c
new file mode 100644
index 0000000..75ccbbc
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfgt.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfle.c
new file mode 100644
index 0000000..49ff1c9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfle.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmflt.c
new file mode 100644
index 0000000..24b3f9c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmflt.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfne.c
new file mode 100644
index 0000000..ca3e134
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfne.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/X86/avx10_2bf16-builtins.c b/clang/test/CodeGen/X86/avx10_2bf16-builtins.c
index c7fea07..f8a4c51 100644
--- a/clang/test/CodeGen/X86/avx10_2bf16-builtins.c
+++ b/clang/test/CodeGen/X86/avx10_2bf16-builtins.c
@@ -274,7 +274,7 @@ __m256bh test_mm256_loadu_pbh(void *p) {
__m128bh test_mm_load_sbh(void const *A) {
// CHECK-LABEL: test_mm_load_sbh
- // CHECK: %{{.*}} = call <8 x bfloat> @llvm.masked.load.v8bf16.p0(ptr %{{.*}}, i32 1, <8 x i1> bitcast (<1 x i8> splat (i8 1) to <8 x i1>), <8 x bfloat> %{{.*}})
+ // CHECK: %{{.*}} = call <8 x bfloat> @llvm.masked.load.v8bf16.p0(ptr align 1 %{{.*}}, <8 x i1> bitcast (<1 x i8> splat (i8 1) to <8 x i1>), <8 x bfloat> %{{.*}})
return _mm_load_sbh(A);
}
@@ -305,7 +305,7 @@ void test_mm_store_sbh(void *A, __m128bh B) {
void test_mm_mask_store_sbh(void *__P, __mmask8 __U, __m128bh __A) {
// CHECK-LABEL: @test_mm_mask_store_sbh
- // CHECK: call void @llvm.masked.store.v8bf16.p0(<8 x bfloat> %{{.*}}, ptr %{{.*}}, i32 1, <8 x i1> %{{.*}})
+ // CHECK: call void @llvm.masked.store.v8bf16.p0(<8 x bfloat> %{{.*}}, ptr align 1 %{{.*}}, <8 x i1> %{{.*}})
_mm_mask_store_sbh(__P, __U, __A);
}
@@ -323,13 +323,13 @@ void test_mm_store_pbh(void *p, __m128bh a) {
__m128bh test_mm_mask_load_sbh(__m128bh __A, __mmask8 __U, const void *__W) {
// CHECK-LABEL: @test_mm_mask_load_sbh
- // CHECK: %{{.*}} = call <8 x bfloat> @llvm.masked.load.v8bf16.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x bfloat> %{{.*}})
+ // CHECK: %{{.*}} = call <8 x bfloat> @llvm.masked.load.v8bf16.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x bfloat> %{{.*}})
return _mm_mask_load_sbh(__A, __U, __W);
}
__m128bh test_mm_maskz_load_sbh(__mmask8 __U, const void *__W) {
// CHECK-LABEL: @test_mm_maskz_load_sbh
- // CHECK: %{{.*}} = call <8 x bfloat> @llvm.masked.load.v8bf16.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x bfloat> %{{.*}})
+ // CHECK: %{{.*}} = call <8 x bfloat> @llvm.masked.load.v8bf16.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x bfloat> %{{.*}})
return _mm_maskz_load_sbh(__U, __W);
}
diff --git a/clang/test/CodeGen/X86/avx2-builtins.c b/clang/test/CodeGen/X86/avx2-builtins.c
index b798618..a505d70 100644
--- a/clang/test/CodeGen/X86/avx2-builtins.c
+++ b/clang/test/CodeGen/X86/avx2-builtins.c
@@ -1038,6 +1038,7 @@ __m256i test_mm256_mulhrs_epi16(__m256i a, __m256i b) {
// CHECK: call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_mulhrs_epi16(a, b);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_mulhrs_epi16((__m256i)(__v16hi){+100, +200, -300, -400, +500, +600, -700, +800, -900, -1000, +1100, +1200, -1300, -1400, +1500, +1600}, (__m256i)(__v16hi){+1600, -1500, +1400, -1300, +1200, -1100, +1000, -900, +800, -700, +600, -500, +400, -300, +200, -100}), +5, -9, -13, +16, +18, -20, -21, -22, -22, +21, +20, -18, -16, +13, +9, -5));
__m256i test_mm256_mullo_epi16(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_mullo_epi16
diff --git a/clang/test/CodeGen/X86/avx512bw-builtins.c b/clang/test/CodeGen/X86/avx512bw-builtins.c
index fddf17d..d07e40a 100644
--- a/clang/test/CodeGen/X86/avx512bw-builtins.c
+++ b/clang/test/CodeGen/X86/avx512bw-builtins.c
@@ -1596,18 +1596,24 @@ __m512i test_mm512_mulhrs_epi16(__m512i __A, __m512i __B) {
// CHECK: @llvm.x86.avx512.pmul.hr.sw.512
return _mm512_mulhrs_epi16(__A,__B);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_mulhrs_epi16((__m512i)(__v32hi){+100, +200, -300, -400, +500, +600, -700, +800, -900, -1000, +1100, +1200, -1300, -1400, +1500, +1600, -1700, -1800, +1900, +2000, -2100, -2200, +2300, +2400, -2500, -2600, +2700, +2800, -2900, -3000, +3100, +3200}, (__m512i)(__v32hi){+3200, -3100, +3000, -2900, +2800, -2700, +2600, -2500, +2400, -2300, +2200, -2100, +2000, -1900, +1800, -1700, +1600, -1500, +1400, -1300, +1200, -1100, +1000, -900, +800, -700, +600, -500, +400, -300, +200, -100}), +10, -19, -27, +35, +43, -49, -56, -61, -66, +70, +74, -77, -79, +81, +82, -83, -83, +82, +81, -79, -77, +74, +70, -66, -61, +56, +49, -43, -35, +27, +19, -10));
+
__m512i test_mm512_mask_mulhrs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: test_mm512_mask_mulhrs_epi16
// CHECK: @llvm.x86.avx512.pmul.hr.sw.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_mulhrs_epi16(__W,__U,__A,__B);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_mask_mulhrs_epi16(_mm512_set1_epi16(1), 0x0000FFFF, (__m512i)(__v32hi){+100, +200, -300, -400, +500, +600, -700, +800, -900, -1000, +1100, +1200, -1300, -1400, +1500, +1600, -1700, -1800, +1900, +2000, -2100, -2200, +2300, +2400, -2500, -2600, +2700, +2800, -2900, -3000, +3100, +3200}, (__m512i)(__v32hi){+3200, -3100, +3000, -2900, +2800, -2700, +2600, -2500, +2400, -2300, +2200, -2100, +2000, -1900, +1800, -1700, +1600, -1500, +1400, -1300, +1200, -1100, +1000, -900, +800, -700, +600, -500, +400, -300, +200, -100}), +10, -19, -27, +35, +43, -49, -56, -61, -66, +70, +74, -77, -79, +81, +82, -83, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1));
+
__m512i test_mm512_maskz_mulhrs_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: test_mm512_maskz_mulhrs_epi16
// CHECK: @llvm.x86.avx512.pmul.hr.sw.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_mulhrs_epi16(__U,__A,__B);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_maskz_mulhrs_epi16(0x0000FFFF, (__m512i)(__v32hi){+100, +200, -300, -400, +500, +600, -700, +800, -900, -1000, +1100, +1200, -1300, -1400, +1500, +1600, -1700, -1800, +1900, +2000, -2100, -2200, +2300, +2400, -2500, -2600, +2700, +2800, -2900, -3000, +3100, +3200}, (__m512i)(__v32hi){+3200, -3100, +3000, -2900, +2800, -2700, +2600, -2500, +2400, -2300, +2200, -2100, +2000, -1900, +1800, -1700, +1600, -1500, +1400, -1300, +1200, -1100, +1000, -900, +800, -700, +600, -500, +400, -300, +200, -100}), +10, -19, -27, +35, +43, -49, -56, -61, -66, +70, +74, -77, -79, +81, +82, -83, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
+
__m512i test_mm512_mulhi_epi16(__m512i __A, __m512i __B) {
// CHECK-LABEL: test_mm512_mulhi_epi16
// CHECK: @llvm.x86.avx512.pmulh.w.512
@@ -2408,13 +2414,13 @@ __m512i test_mm512_loadu_epi16 (void *__P)
__m512i test_mm512_mask_loadu_epi16(__m512i __W, __mmask32 __U, void const *__P) {
// CHECK-LABEL: test_mm512_mask_loadu_epi16
- // CHECK: @llvm.masked.load.v32i16.p0(ptr %{{.*}}, i32 1, <32 x i1> %{{.*}}, <32 x i16> %{{.*}})
+ // CHECK: @llvm.masked.load.v32i16.p0(ptr align 1 %{{.*}}, <32 x i1> %{{.*}}, <32 x i16> %{{.*}})
return _mm512_mask_loadu_epi16(__W, __U, __P);
}
__m512i test_mm512_maskz_loadu_epi16(__mmask32 __U, void const *__P) {
// CHECK-LABEL: test_mm512_maskz_loadu_epi16
- // CHECK: @llvm.masked.load.v32i16.p0(ptr %{{.*}}, i32 1, <32 x i1> %{{.*}}, <32 x i16> %{{.*}})
+ // CHECK: @llvm.masked.load.v32i16.p0(ptr align 1 %{{.*}}, <32 x i1> %{{.*}}, <32 x i16> %{{.*}})
return _mm512_maskz_loadu_epi16(__U, __P);
}
@@ -2427,13 +2433,13 @@ __m512i test_mm512_loadu_epi8 (void *__P)
__m512i test_mm512_mask_loadu_epi8(__m512i __W, __mmask64 __U, void const *__P) {
// CHECK-LABEL: test_mm512_mask_loadu_epi8
- // CHECK: @llvm.masked.load.v64i8.p0(ptr %{{.*}}, i32 1, <64 x i1> %{{.*}}, <64 x i8> %{{.*}})
+ // CHECK: @llvm.masked.load.v64i8.p0(ptr align 1 %{{.*}}, <64 x i1> %{{.*}}, <64 x i8> %{{.*}})
return _mm512_mask_loadu_epi8(__W, __U, __P);
}
__m512i test_mm512_maskz_loadu_epi8(__mmask64 __U, void const *__P) {
// CHECK-LABEL: test_mm512_maskz_loadu_epi8
- // CHECK: @llvm.masked.load.v64i8.p0(ptr %{{.*}}, i32 1, <64 x i1> %{{.*}}, <64 x i8> %{{.*}})
+ // CHECK: @llvm.masked.load.v64i8.p0(ptr align 1 %{{.*}}, <64 x i1> %{{.*}}, <64 x i8> %{{.*}})
return _mm512_maskz_loadu_epi8(__U, __P);
}
@@ -2445,7 +2451,7 @@ void test_mm512_storeu_epi16(void *__P, __m512i __A) {
void test_mm512_mask_storeu_epi16(void *__P, __mmask32 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_mask_storeu_epi16
- // CHECK: @llvm.masked.store.v32i16.p0(<32 x i16> %{{.*}}, ptr %{{.*}}, i32 1, <32 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v32i16.p0(<32 x i16> %{{.*}}, ptr align 1 %{{.*}}, <32 x i1> %{{.*}})
return _mm512_mask_storeu_epi16(__P, __U, __A);
}
@@ -2464,7 +2470,7 @@ void test_mm512_storeu_epi8(void *__P, __m512i __A) {
void test_mm512_mask_storeu_epi8(void *__P, __mmask64 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_mask_storeu_epi8
- // CHECK: @llvm.masked.store.v64i8.p0(<64 x i8> %{{.*}}, ptr %{{.*}}, i32 1, <64 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v64i8.p0(<64 x i8> %{{.*}}, ptr align 1 %{{.*}}, <64 x i1> %{{.*}})
return _mm512_mask_storeu_epi8(__P, __U, __A);
}
__mmask64 test_mm512_mask_test_epi8_mask(__mmask64 __U, __m512i __A, __m512i __B) {
diff --git a/clang/test/CodeGen/X86/avx512f-builtins.c b/clang/test/CodeGen/X86/avx512f-builtins.c
index 3deaf8e..6073fdf 100644
--- a/clang/test/CodeGen/X86/avx512f-builtins.c
+++ b/clang/test/CodeGen/X86/avx512f-builtins.c
@@ -212,7 +212,7 @@ void test_mm512_storeu_pd(void *p, __m512d a)
void test_mm512_mask_store_ps(void *p, __m512 a, __mmask16 m)
{
// CHECK-LABEL: test_mm512_mask_store_ps
- // CHECK: @llvm.masked.store.v16f32.p0(<16 x float> %{{.*}}, ptr %{{.*}}, i32 64, <16 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v16f32.p0(<16 x float> %{{.*}}, ptr align 64 %{{.*}}, <16 x i1> %{{.*}})
_mm512_mask_store_ps(p, m, a);
}
@@ -260,7 +260,7 @@ void test_mm512_store_pd(void *p, __m512d a)
void test_mm512_mask_store_pd(void *p, __m512d a, __mmask8 m)
{
// CHECK-LABEL: test_mm512_mask_store_pd
- // CHECK: @llvm.masked.store.v8f64.p0(<8 x double> %{{.*}}, ptr %{{.*}}, i32 64, <8 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v8f64.p0(<8 x double> %{{.*}}, ptr align 64 %{{.*}}, <8 x i1> %{{.*}})
_mm512_mask_store_pd(p, m, a);
}
@@ -272,7 +272,7 @@ void test_mm512_storeu_epi32(void *__P, __m512i __A) {
void test_mm512_mask_storeu_epi32(void *__P, __mmask16 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_mask_storeu_epi32
- // CHECK: @llvm.masked.store.v16i32.p0(<16 x i32> %{{.*}}, ptr %{{.*}}, i32 1, <16 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v16i32.p0(<16 x i32> %{{.*}}, ptr align 1 %{{.*}}, <16 x i1> %{{.*}})
return _mm512_mask_storeu_epi32(__P, __U, __A);
}
@@ -284,7 +284,7 @@ void test_mm512_storeu_epi64(void *__P, __m512i __A) {
void test_mm512_mask_storeu_epi64(void *__P, __mmask8 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_mask_storeu_epi64
- // CHECK: @llvm.masked.store.v8i64.p0(<8 x i64> %{{.*}}, ptr %{{.*}}, i32 1, <8 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v8i64.p0(<8 x i64> %{{.*}}, ptr align 1 %{{.*}}, <8 x i1> %{{.*}})
return _mm512_mask_storeu_epi64(__P, __U, __A);
}
@@ -305,14 +305,14 @@ __m512i test_mm512_loadu_epi32 (void *__P)
__m512i test_mm512_mask_loadu_epi32 (__m512i __W, __mmask16 __U, void *__P)
{
// CHECK-LABEL: test_mm512_mask_loadu_epi32
- // CHECK: @llvm.masked.load.v16i32.p0(ptr %{{.*}}, i32 1, <16 x i1> %{{.*}}, <16 x i32> %{{.*}})
+ // CHECK: @llvm.masked.load.v16i32.p0(ptr align 1 %{{.*}}, <16 x i1> %{{.*}}, <16 x i32> %{{.*}})
return _mm512_mask_loadu_epi32 (__W,__U, __P);
}
__m512i test_mm512_maskz_loadu_epi32 (__mmask16 __U, void *__P)
{
// CHECK-LABEL: test_mm512_maskz_loadu_epi32
- // CHECK: @llvm.masked.load.v16i32.p0(ptr %{{.*}}, i32 1, <16 x i1> %{{.*}}, <16 x i32> %{{.*}})
+ // CHECK: @llvm.masked.load.v16i32.p0(ptr align 1 %{{.*}}, <16 x i1> %{{.*}}, <16 x i32> %{{.*}})
return _mm512_maskz_loadu_epi32 (__U, __P);
}
@@ -326,14 +326,14 @@ __m512i test_mm512_loadu_epi64 (void *__P)
__m512i test_mm512_mask_loadu_epi64 (__m512i __W, __mmask8 __U, void *__P)
{
// CHECK-LABEL: test_mm512_mask_loadu_epi64
- // CHECK: @llvm.masked.load.v8i64.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x i64> %{{.*}})
+ // CHECK: @llvm.masked.load.v8i64.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x i64> %{{.*}})
return _mm512_mask_loadu_epi64 (__W,__U, __P);
}
__m512i test_mm512_maskz_loadu_epi64 (__mmask16 __U, void *__P)
{
// CHECK-LABEL: test_mm512_maskz_loadu_epi64
- // CHECK: @llvm.masked.load.v8i64.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x i64> %{{.*}})
+ // CHECK: @llvm.masked.load.v8i64.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x i64> %{{.*}})
return _mm512_maskz_loadu_epi64 (__U, __P);
}
@@ -347,7 +347,7 @@ __m512 test_mm512_loadu_ps(void *p)
__m512 test_mm512_mask_loadu_ps (__m512 __W, __mmask16 __U, void *__P)
{
// CHECK-LABEL: test_mm512_mask_loadu_ps
- // CHECK: @llvm.masked.load.v16f32.p0(ptr %{{.*}}, i32 1, <16 x i1> %{{.*}}, <16 x float> %{{.*}})
+ // CHECK: @llvm.masked.load.v16f32.p0(ptr align 1 %{{.*}}, <16 x i1> %{{.*}}, <16 x float> %{{.*}})
return _mm512_mask_loadu_ps (__W,__U, __P);
}
@@ -361,7 +361,7 @@ __m512d test_mm512_loadu_pd(void *p)
__m512d test_mm512_mask_loadu_pd (__m512d __W, __mmask8 __U, void *__P)
{
// CHECK-LABEL: test_mm512_mask_loadu_pd
- // CHECK: @llvm.masked.load.v8f64.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x double> %{{.*}})
+ // CHECK: @llvm.masked.load.v8f64.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x double> %{{.*}})
return _mm512_mask_loadu_pd (__W,__U, __P);
}
@@ -399,14 +399,14 @@ __m512 test_mm512_load_ps(void *p)
__m512 test_mm512_mask_load_ps (__m512 __W, __mmask16 __U, void *__P)
{
// CHECK-LABEL: test_mm512_mask_load_ps
- // CHECK: @llvm.masked.load.v16f32.p0(ptr %{{.*}}, i32 64, <16 x i1> %{{.*}}, <16 x float> %{{.*}})
+ // CHECK: @llvm.masked.load.v16f32.p0(ptr align 64 %{{.*}}, <16 x i1> %{{.*}}, <16 x float> %{{.*}})
return _mm512_mask_load_ps (__W,__U, __P);
}
__m512 test_mm512_maskz_load_ps(__mmask16 __U, void *__P)
{
// CHECK-LABEL: test_mm512_maskz_load_ps
- // CHECK: @llvm.masked.load.v16f32.p0(ptr %{{.*}}, i32 64, <16 x i1> %{{.*}}, <16 x float> %{{.*}})
+ // CHECK: @llvm.masked.load.v16f32.p0(ptr align 64 %{{.*}}, <16 x i1> %{{.*}}, <16 x float> %{{.*}})
return _mm512_maskz_load_ps(__U, __P);
}
@@ -420,14 +420,14 @@ __m512d test_mm512_load_pd(void *p)
__m512d test_mm512_mask_load_pd (__m512d __W, __mmask8 __U, void *__P)
{
// CHECK-LABEL: test_mm512_mask_load_pd
- // CHECK: @llvm.masked.load.v8f64.p0(ptr %{{.*}}, i32 64, <8 x i1> %{{.*}}, <8 x double> %{{.*}})
+ // CHECK: @llvm.masked.load.v8f64.p0(ptr align 64 %{{.*}}, <8 x i1> %{{.*}}, <8 x double> %{{.*}})
return _mm512_mask_load_pd (__W,__U, __P);
}
__m512d test_mm512_maskz_load_pd(__mmask8 __U, void *__P)
{
// CHECK-LABEL: test_mm512_maskz_load_pd
- // CHECK: @llvm.masked.load.v8f64.p0(ptr %{{.*}}, i32 64, <8 x i1> %{{.*}}, <8 x double> %{{.*}})
+ // CHECK: @llvm.masked.load.v8f64.p0(ptr align 64 %{{.*}}, <8 x i1> %{{.*}}, <8 x double> %{{.*}})
return _mm512_maskz_load_pd(__U, __P);
}
@@ -4560,13 +4560,13 @@ __m512i test_mm512_maskz_srli_epi64_2(__mmask8 __U, __m512i __A, unsigned int __
__m512i test_mm512_mask_load_epi32(__m512i __W, __mmask16 __U, void const *__P) {
// CHECK-LABEL: test_mm512_mask_load_epi32
- // CHECK: @llvm.masked.load.v16i32.p0(ptr %{{.*}}, i32 64, <16 x i1> %{{.*}}, <16 x i32> %{{.*}})
+ // CHECK: @llvm.masked.load.v16i32.p0(ptr align 64 %{{.*}}, <16 x i1> %{{.*}}, <16 x i32> %{{.*}})
return _mm512_mask_load_epi32(__W, __U, __P);
}
__m512i test_mm512_maskz_load_epi32(__mmask16 __U, void const *__P) {
// CHECK-LABEL: test_mm512_maskz_load_epi32
- // CHECK: @llvm.masked.load.v16i32.p0(ptr %{{.*}}, i32 64, <16 x i1> %{{.*}}, <16 x i32> %{{.*}})
+ // CHECK: @llvm.masked.load.v16i32.p0(ptr align 64 %{{.*}}, <16 x i1> %{{.*}}, <16 x i32> %{{.*}})
return _mm512_maskz_load_epi32(__U, __P);
}
@@ -4596,25 +4596,25 @@ __m512i test_mm512_maskz_mov_epi64(__mmask8 __U, __m512i __A) {
__m512i test_mm512_mask_load_epi64(__m512i __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm512_mask_load_epi64
- // CHECK: @llvm.masked.load.v8i64.p0(ptr %{{.*}}, i32 64, <8 x i1> %{{.*}}, <8 x i64> %{{.*}})
+ // CHECK: @llvm.masked.load.v8i64.p0(ptr align 64 %{{.*}}, <8 x i1> %{{.*}}, <8 x i64> %{{.*}})
return _mm512_mask_load_epi64(__W, __U, __P);
}
__m512i test_mm512_maskz_load_epi64(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm512_maskz_load_epi64
- // CHECK: @llvm.masked.load.v8i64.p0(ptr %{{.*}}, i32 64, <8 x i1> %{{.*}}, <8 x i64> %{{.*}})
+ // CHECK: @llvm.masked.load.v8i64.p0(ptr align 64 %{{.*}}, <8 x i1> %{{.*}}, <8 x i64> %{{.*}})
return _mm512_maskz_load_epi64(__U, __P);
}
void test_mm512_mask_store_epi32(void *__P, __mmask16 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_mask_store_epi32
- // CHECK: @llvm.masked.store.v16i32.p0(<16 x i32> %{{.*}}, ptr %{{.*}}, i32 64, <16 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v16i32.p0(<16 x i32> %{{.*}}, ptr align 64 %{{.*}}, <16 x i1> %{{.*}})
return _mm512_mask_store_epi32(__P, __U, __A);
}
void test_mm512_mask_store_epi64(void *__P, __mmask8 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_mask_store_epi64
- // CHECK: @llvm.masked.store.v8i64.p0(<8 x i64> %{{.*}}, ptr %{{.*}}, i32 64, <8 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v8i64.p0(<8 x i64> %{{.*}}, ptr align 64 %{{.*}}, <8 x i1> %{{.*}})
return _mm512_mask_store_epi64(__P, __U, __A);
}
@@ -11572,42 +11572,42 @@ __m128d test_mm_maskz_move_sd (__mmask8 __U, __m128d __A, __m128d __B)
void test_mm_mask_store_ss(float * __P, __mmask8 __U, __m128 __A)
{
// CHECK-LABEL: test_mm_mask_store_ss
- // CHECK: call void @llvm.masked.store.v4f32.p0(<4 x float> %{{.*}}, ptr %{{.*}}, i32 1, <4 x i1> %{{.*}})
+ // CHECK: call void @llvm.masked.store.v4f32.p0(<4 x float> %{{.*}}, ptr align 1 %{{.*}}, <4 x i1> %{{.*}})
_mm_mask_store_ss(__P, __U, __A);
}
void test_mm_mask_store_sd(double * __P, __mmask8 __U, __m128d __A)
{
// CHECK-LABEL: test_mm_mask_store_sd
- // CHECK: call void @llvm.masked.store.v2f64.p0(<2 x double> %{{.*}}, ptr %{{.*}}, i32 1, <2 x i1> %{{.*}})
+ // CHECK: call void @llvm.masked.store.v2f64.p0(<2 x double> %{{.*}}, ptr align 1 %{{.*}}, <2 x i1> %{{.*}})
_mm_mask_store_sd(__P, __U, __A);
}
__m128 test_mm_mask_load_ss(__m128 __A, __mmask8 __U, const float* __W)
{
// CHECK-LABEL: test_mm_mask_load_ss
- // CHECK: call {{.*}}<4 x float> @llvm.masked.load.v4f32.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
+ // CHECK: call {{.*}}<4 x float> @llvm.masked.load.v4f32.p0(ptr align 1 %{{.*}}, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
return _mm_mask_load_ss(__A, __U, __W);
}
__m128 test_mm_maskz_load_ss (__mmask8 __U, const float * __W)
{
// CHECK-LABEL: test_mm_maskz_load_ss
- // CHECK: call {{.*}}<4 x float> @llvm.masked.load.v4f32.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
+ // CHECK: call {{.*}}<4 x float> @llvm.masked.load.v4f32.p0(ptr align 1 %{{.*}}, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
return _mm_maskz_load_ss (__U, __W);
}
__m128d test_mm_mask_load_sd (__m128d __A, __mmask8 __U, const double * __W)
{
// CHECK-LABEL: test_mm_mask_load_sd
- // CHECK: call {{.*}}<2 x double> @llvm.masked.load.v2f64.p0(ptr %{{.*}}, i32 1, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
+ // CHECK: call {{.*}}<2 x double> @llvm.masked.load.v2f64.p0(ptr align 1 %{{.*}}, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
return _mm_mask_load_sd (__A, __U, __W);
}
__m128d test_mm_maskz_load_sd (__mmask8 __U, const double * __W)
{
// CHECK-LABEL: test_mm_maskz_load_sd
- // CHECK: call {{.*}}<2 x double> @llvm.masked.load.v2f64.p0(ptr %{{.*}}, i32 1, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
+ // CHECK: call {{.*}}<2 x double> @llvm.masked.load.v2f64.p0(ptr align 1 %{{.*}}, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
return _mm_maskz_load_sd (__U, __W);
}
diff --git a/clang/test/CodeGen/X86/avx512fp16-builtins.c b/clang/test/CodeGen/X86/avx512fp16-builtins.c
index 2befff0..f0a0a3b 100644
--- a/clang/test/CodeGen/X86/avx512fp16-builtins.c
+++ b/clang/test/CodeGen/X86/avx512fp16-builtins.c
@@ -1505,13 +1505,13 @@ __m128h test_mm_load_sh(void const *A) {
__m128h test_mm_mask_load_sh(__m128h __A, __mmask8 __U, const void *__W) {
// CHECK-LABEL: test_mm_mask_load_sh
- // CHECK: @llvm.masked.load.v8f16.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK: @llvm.masked.load.v8f16.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x half> %{{.*}})
return _mm_mask_load_sh(__A, __U, __W);
}
__m128h test_mm_maskz_load_sh(__mmask8 __U, const void *__W) {
// CHECK-LABEL: test_mm_maskz_load_sh
- // CHECK: @llvm.masked.load.v8f16.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK: @llvm.masked.load.v8f16.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x half> %{{.*}})
return _mm_maskz_load_sh(__U, __W);
}
@@ -1560,7 +1560,7 @@ void test_mm_store_sh(void *A, __m128h B) {
void test_mm_mask_store_sh(void *__P, __mmask8 __U, __m128h __A) {
// CHECK-LABEL: test_mm_mask_store_sh
- // CHECK: call void @llvm.masked.store.v8f16.p0(<8 x half> %{{.*}}, ptr %{{.*}}, i32 1, <8 x i1> %{{.*}})
+ // CHECK: call void @llvm.masked.store.v8f16.p0(<8 x half> %{{.*}}, ptr align 1 %{{.*}}, <8 x i1> %{{.*}})
_mm_mask_store_sh(__P, __U, __A);
}
diff --git a/clang/test/CodeGen/X86/avx512vl-builtins.c b/clang/test/CodeGen/X86/avx512vl-builtins.c
index 9b6bfea9..9819dbd 100644
--- a/clang/test/CodeGen/X86/avx512vl-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vl-builtins.c
@@ -7002,7 +7002,7 @@ void test_mm_store_epi32(void *__P, __m128i __A) {
void test_mm_mask_store_epi32(void *__P, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: test_mm_mask_store_epi32
- // CHECK: @llvm.masked.store.v4i32.p0(<4 x i32> %{{.*}}, ptr %{{.}}, i32 16, <4 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v4i32.p0(<4 x i32> %{{.*}}, ptr align 16 %{{.}}, <4 x i1> %{{.*}})
return _mm_mask_store_epi32(__P, __U, __A);
}
@@ -7014,7 +7014,7 @@ void test_mm256_store_epi32(void *__P, __m256i __A) {
void test_mm256_mask_store_epi32(void *__P, __mmask8 __U, __m256i __A) {
// CHECK-LABEL: test_mm256_mask_store_epi32
- // CHECK: @llvm.masked.store.v8i32.p0(<8 x i32> %{{.*}}, ptr %{{.}}, i32 32, <8 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v8i32.p0(<8 x i32> %{{.*}}, ptr align 32 %{{.}}, <8 x i1> %{{.*}})
return _mm256_mask_store_epi32(__P, __U, __A);
}
@@ -7074,13 +7074,13 @@ __m128i test_mm_load_epi32(void const *__P) {
__m128i test_mm_mask_load_epi32(__m128i __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_mask_load_epi32
- // CHECK: @llvm.masked.load.v4i32.p0(ptr %{{.*}}, i32 16, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
+ // CHECK: @llvm.masked.load.v4i32.p0(ptr align 16 %{{.*}}, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
return _mm_mask_load_epi32(__W, __U, __P);
}
__m128i test_mm_maskz_load_epi32(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_maskz_load_epi32
- // CHECK: @llvm.masked.load.v4i32.p0(ptr %{{.*}}, i32 16, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
+ // CHECK: @llvm.masked.load.v4i32.p0(ptr align 16 %{{.*}}, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
return _mm_maskz_load_epi32(__U, __P);
}
@@ -7092,13 +7092,13 @@ __m256i test_mm256_load_epi32(void const *__P) {
__m256i test_mm256_mask_load_epi32(__m256i __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_mask_load_epi32
- // CHECK: @llvm.masked.load.v8i32.p0(ptr %{{.*}}, i32 32, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
+ // CHECK: @llvm.masked.load.v8i32.p0(ptr align 32 %{{.*}}, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_mask_load_epi32(__W, __U, __P);
}
__m256i test_mm256_maskz_load_epi32(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_maskz_load_epi32
- // CHECK: @llvm.masked.load.v8i32.p0(ptr %{{.*}}, i32 32, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
+ // CHECK: @llvm.masked.load.v8i32.p0(ptr align 32 %{{.*}}, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_maskz_load_epi32(__U, __P);
}
@@ -7110,13 +7110,13 @@ __m128i test_mm_load_epi64(void const *__P) {
__m128i test_mm_mask_load_epi64(__m128i __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_mask_load_epi64
- // CHECK: @llvm.masked.load.v2i64.p0(ptr %{{.*}}, i32 16, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK: @llvm.masked.load.v2i64.p0(ptr align 16 %{{.*}}, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
return _mm_mask_load_epi64(__W, __U, __P);
}
__m128i test_mm_maskz_load_epi64(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_maskz_load_epi64
- // CHECK: @llvm.masked.load.v2i64.p0(ptr %{{.*}}, i32 16, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK: @llvm.masked.load.v2i64.p0(ptr align 16 %{{.*}}, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
return _mm_maskz_load_epi64(__U, __P);
}
@@ -7128,13 +7128,13 @@ __m256i test_mm256_load_epi64(void const *__P) {
__m256i test_mm256_mask_load_epi64(__m256i __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_mask_load_epi64
- // CHECK: @llvm.masked.load.v4i64.p0(ptr %{{.*}}, i32 32, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK: @llvm.masked.load.v4i64.p0(ptr align 32 %{{.*}}, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_mask_load_epi64(__W, __U, __P);
}
__m256i test_mm256_maskz_load_epi64(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_maskz_load_epi64
- // CHECK: @llvm.masked.load.v4i64.p0(ptr %{{.*}}, i32 32, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK: @llvm.masked.load.v4i64.p0(ptr align 32 %{{.*}}, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_maskz_load_epi64(__U, __P);
}
@@ -7146,7 +7146,7 @@ void test_mm_store_epi64(void *__P, __m128i __A) {
void test_mm_mask_store_epi64(void *__P, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: test_mm_mask_store_epi64
- // CHECK: @llvm.masked.store.v2i64.p0(<2 x i64> %{{.*}}, ptr %{{.*}}, i32 16, <2 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v2i64.p0(<2 x i64> %{{.*}}, ptr align 16 %{{.*}}, <2 x i1> %{{.*}})
return _mm_mask_store_epi64(__P, __U, __A);
}
@@ -7158,7 +7158,7 @@ void test_mm256_store_epi64(void *__P, __m256i __A) {
void test_mm256_mask_store_epi64(void *__P, __mmask8 __U, __m256i __A) {
// CHECK-LABEL: test_mm256_mask_store_epi64
- // CHECK: @llvm.masked.store.v4i64.p0(<4 x i64> %{{.*}}, ptr %{{.*}}, i32 32, <4 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v4i64.p0(<4 x i64> %{{.*}}, ptr align 32 %{{.*}}, <4 x i1> %{{.*}})
return _mm256_mask_store_epi64(__P, __U, __A);
}
@@ -7370,49 +7370,49 @@ __m256 test_mm256_maskz_fixupimm_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256
__m128d test_mm_mask_load_pd(__m128d __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_mask_load_pd
- // CHECK: @llvm.masked.load.v2f64.p0(ptr %{{.*}}, i32 16, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
+ // CHECK: @llvm.masked.load.v2f64.p0(ptr align 16 %{{.*}}, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
return _mm_mask_load_pd(__W, __U, __P);
}
__m128d test_mm_maskz_load_pd(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_maskz_load_pd
- // CHECK: @llvm.masked.load.v2f64.p0(ptr %{{.*}}, i32 16, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
+ // CHECK: @llvm.masked.load.v2f64.p0(ptr align 16 %{{.*}}, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
return _mm_maskz_load_pd(__U, __P);
}
__m256d test_mm256_mask_load_pd(__m256d __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_mask_load_pd
- // CHECK: @llvm.masked.load.v4f64.p0(ptr %{{.*}}, i32 32, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
+ // CHECK: @llvm.masked.load.v4f64.p0(ptr align 32 %{{.*}}, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
return _mm256_mask_load_pd(__W, __U, __P);
}
__m256d test_mm256_maskz_load_pd(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_maskz_load_pd
- // CHECK: @llvm.masked.load.v4f64.p0(ptr %{{.*}}, i32 32, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
+ // CHECK: @llvm.masked.load.v4f64.p0(ptr align 32 %{{.*}}, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
return _mm256_maskz_load_pd(__U, __P);
}
__m128 test_mm_mask_load_ps(__m128 __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_mask_load_ps
- // CHECK: @llvm.masked.load.v4f32.p0(ptr %{{.*}}, i32 16, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
+ // CHECK: @llvm.masked.load.v4f32.p0(ptr align 16 %{{.*}}, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
return _mm_mask_load_ps(__W, __U, __P);
}
__m128 test_mm_maskz_load_ps(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_maskz_load_ps
- // CHECK: @llvm.masked.load.v4f32.p0(ptr %{{.*}}, i32 16, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
+ // CHECK: @llvm.masked.load.v4f32.p0(ptr align 16 %{{.*}}, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
return _mm_maskz_load_ps(__U, __P);
}
__m256 test_mm256_mask_load_ps(__m256 __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_mask_load_ps
- // CHECK: @llvm.masked.load.v8f32.p0(ptr %{{.*}}, i32 32, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
+ // CHECK: @llvm.masked.load.v8f32.p0(ptr align 32 %{{.*}}, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
return _mm256_mask_load_ps(__W, __U, __P);
}
__m256 test_mm256_maskz_load_ps(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_maskz_load_ps
- // CHECK: @llvm.masked.load.v8f32.p0(ptr %{{.*}}, i32 32, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
+ // CHECK: @llvm.masked.load.v8f32.p0(ptr align 32 %{{.*}}, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
return _mm256_maskz_load_ps(__U, __P);
}
@@ -7424,13 +7424,13 @@ __m128i test_mm_loadu_epi64(void const *__P) {
__m128i test_mm_mask_loadu_epi64(__m128i __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_mask_loadu_epi64
- // CHECK: @llvm.masked.load.v2i64.p0(ptr %{{.*}}, i32 1, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK: @llvm.masked.load.v2i64.p0(ptr align 1 %{{.*}}, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
return _mm_mask_loadu_epi64(__W, __U, __P);
}
__m128i test_mm_maskz_loadu_epi64(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_maskz_loadu_epi64
- // CHECK: @llvm.masked.load.v2i64.p0(ptr %{{.*}}, i32 1, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK: @llvm.masked.load.v2i64.p0(ptr align 1 %{{.*}}, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
return _mm_maskz_loadu_epi64(__U, __P);
}
@@ -7442,13 +7442,13 @@ __m256i test_mm256_loadu_epi64(void const *__P) {
__m256i test_mm256_mask_loadu_epi64(__m256i __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_mask_loadu_epi64
- // CHECK: @llvm.masked.load.v4i64.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK: @llvm.masked.load.v4i64.p0(ptr align 1 %{{.*}}, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_mask_loadu_epi64(__W, __U, __P);
}
__m256i test_mm256_maskz_loadu_epi64(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_maskz_loadu_epi64
- // CHECK: @llvm.masked.load.v4i64.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK: @llvm.masked.load.v4i64.p0(ptr align 1 %{{.*}}, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_maskz_loadu_epi64(__U, __P);
}
@@ -7460,13 +7460,13 @@ __m128i test_mm_loadu_epi32(void const *__P) {
__m128i test_mm_mask_loadu_epi32(__m128i __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_mask_loadu_epi32
- // CHECK: @llvm.masked.load.v4i32.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
+ // CHECK: @llvm.masked.load.v4i32.p0(ptr align 1 %{{.*}}, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
return _mm_mask_loadu_epi32(__W, __U, __P);
}
__m128i test_mm_maskz_loadu_epi32(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_maskz_loadu_epi32
- // CHECK: @llvm.masked.load.v4i32.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
+ // CHECK: @llvm.masked.load.v4i32.p0(ptr align 1 %{{.*}}, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
return _mm_maskz_loadu_epi32(__U, __P);
}
@@ -7478,85 +7478,85 @@ __m256i test_mm256_loadu_epi32(void const *__P) {
__m256i test_mm256_mask_loadu_epi32(__m256i __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_mask_loadu_epi32
- // CHECK: @llvm.masked.load.v8i32.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
+ // CHECK: @llvm.masked.load.v8i32.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_mask_loadu_epi32(__W, __U, __P);
}
__m256i test_mm256_maskz_loadu_epi32(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_maskz_loadu_epi32
- // CHECK: @llvm.masked.load.v8i32.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
+ // CHECK: @llvm.masked.load.v8i32.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_maskz_loadu_epi32(__U, __P);
}
__m128d test_mm_mask_loadu_pd(__m128d __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_mask_loadu_pd
- // CHECK: @llvm.masked.load.v2f64.p0(ptr %{{.*}}, i32 1, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
+ // CHECK: @llvm.masked.load.v2f64.p0(ptr align 1 %{{.*}}, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
return _mm_mask_loadu_pd(__W, __U, __P);
}
__m128d test_mm_maskz_loadu_pd(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_maskz_loadu_pd
- // CHECK: @llvm.masked.load.v2f64.p0(ptr %{{.*}}, i32 1, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
+ // CHECK: @llvm.masked.load.v2f64.p0(ptr align 1 %{{.*}}, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
return _mm_maskz_loadu_pd(__U, __P);
}
__m256d test_mm256_mask_loadu_pd(__m256d __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_mask_loadu_pd
- // CHECK: @llvm.masked.load.v4f64.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
+ // CHECK: @llvm.masked.load.v4f64.p0(ptr align 1 %{{.*}}, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
return _mm256_mask_loadu_pd(__W, __U, __P);
}
__m256d test_mm256_maskz_loadu_pd(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_maskz_loadu_pd
- // CHECK: @llvm.masked.load.v4f64.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
+ // CHECK: @llvm.masked.load.v4f64.p0(ptr align 1 %{{.*}}, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
return _mm256_maskz_loadu_pd(__U, __P);
}
__m128 test_mm_mask_loadu_ps(__m128 __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_mask_loadu_ps
- // CHECK: @llvm.masked.load.v4f32.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
+ // CHECK: @llvm.masked.load.v4f32.p0(ptr align 1 %{{.*}}, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
return _mm_mask_loadu_ps(__W, __U, __P);
}
__m128 test_mm_maskz_loadu_ps(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_maskz_loadu_ps
- // CHECK: @llvm.masked.load.v4f32.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
+ // CHECK: @llvm.masked.load.v4f32.p0(ptr align 1 %{{.*}}, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
return _mm_maskz_loadu_ps(__U, __P);
}
__m256 test_mm256_mask_loadu_ps(__m256 __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_mask_loadu_ps
- // CHECK: @llvm.masked.load.v8f32.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
+ // CHECK: @llvm.masked.load.v8f32.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
return _mm256_mask_loadu_ps(__W, __U, __P);
}
__m256 test_mm256_maskz_loadu_ps(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_maskz_loadu_ps
- // CHECK: @llvm.masked.load.v8f32.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
+ // CHECK: @llvm.masked.load.v8f32.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
return _mm256_maskz_loadu_ps(__U, __P);
}
void test_mm_mask_store_pd(void *__P, __mmask8 __U, __m128d __A) {
// CHECK-LABEL: test_mm_mask_store_pd
- // CHECK: @llvm.masked.store.v2f64.p0(<2 x double> %{{.*}}, ptr %{{.*}}, i32 16, <2 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v2f64.p0(<2 x double> %{{.*}}, ptr align 16 %{{.*}}, <2 x i1> %{{.*}})
return _mm_mask_store_pd(__P, __U, __A);
}
void test_mm256_mask_store_pd(void *__P, __mmask8 __U, __m256d __A) {
// CHECK-LABEL: test_mm256_mask_store_pd
- // CHECK: @llvm.masked.store.v4f64.p0(<4 x double> %{{.*}}, ptr %{{.*}}, i32 32, <4 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v4f64.p0(<4 x double> %{{.*}}, ptr align 32 %{{.*}}, <4 x i1> %{{.*}})
return _mm256_mask_store_pd(__P, __U, __A);
}
void test_mm_mask_store_ps(void *__P, __mmask8 __U, __m128 __A) {
// CHECK-LABEL: test_mm_mask_store_ps
- // CHECK: @llvm.masked.store.v4f32.p0(<4 x float> %{{.*}}, ptr %{{.*}}, i32 16, <4 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v4f32.p0(<4 x float> %{{.*}}, ptr align 16 %{{.*}}, <4 x i1> %{{.*}})
return _mm_mask_store_ps(__P, __U, __A);
}
void test_mm256_mask_store_ps(void *__P, __mmask8 __U, __m256 __A) {
// CHECK-LABEL: test_mm256_mask_store_ps
- // CHECK: @llvm.masked.store.v8f32.p0(<8 x float> %{{.*}}, ptr %{{.*}}, i32 32, <8 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v8f32.p0(<8 x float> %{{.*}}, ptr align 32 %{{.*}}, <8 x i1> %{{.*}})
return _mm256_mask_store_ps(__P, __U, __A);
}
@@ -7568,7 +7568,7 @@ void test_mm_storeu_epi64(void *__p, __m128i __a) {
void test_mm_mask_storeu_epi64(void *__P, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: test_mm_mask_storeu_epi64
- // CHECK: @llvm.masked.store.v2i64.p0(<2 x i64> %{{.*}}, ptr %{{.*}}, i32 1, <2 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v2i64.p0(<2 x i64> %{{.*}}, ptr align 1 %{{.*}}, <2 x i1> %{{.*}})
return _mm_mask_storeu_epi64(__P, __U, __A);
}
@@ -7580,7 +7580,7 @@ void test_mm256_storeu_epi64(void *__P, __m256i __A) {
void test_mm256_mask_storeu_epi64(void *__P, __mmask8 __U, __m256i __A) {
// CHECK-LABEL: test_mm256_mask_storeu_epi64
- // CHECK: @llvm.masked.store.v4i64.p0(<4 x i64> %{{.*}}, ptr %{{.*}}, i32 1, <4 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v4i64.p0(<4 x i64> %{{.*}}, ptr align 1 %{{.*}}, <4 x i1> %{{.*}})
return _mm256_mask_storeu_epi64(__P, __U, __A);
}
@@ -7592,7 +7592,7 @@ void test_mm_storeu_epi32(void *__P, __m128i __A) {
void test_mm_mask_storeu_epi32(void *__P, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: test_mm_mask_storeu_epi32
- // CHECK: @llvm.masked.store.v4i32.p0(<4 x i32> %{{.*}}, ptr %{{.*}}, i32 1, <4 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v4i32.p0(<4 x i32> %{{.*}}, ptr align 1 %{{.*}}, <4 x i1> %{{.*}})
return _mm_mask_storeu_epi32(__P, __U, __A);
}
@@ -7604,31 +7604,31 @@ void test_mm256_storeu_epi32(void *__P, __m256i __A) {
void test_mm256_mask_storeu_epi32(void *__P, __mmask8 __U, __m256i __A) {
// CHECK-LABEL: test_mm256_mask_storeu_epi32
- // CHECK: @llvm.masked.store.v8i32.p0(<8 x i32> %{{.*}}, ptr %{{.*}}, i32 1, <8 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v8i32.p0(<8 x i32> %{{.*}}, ptr align 1 %{{.*}}, <8 x i1> %{{.*}})
return _mm256_mask_storeu_epi32(__P, __U, __A);
}
void test_mm_mask_storeu_pd(void *__P, __mmask8 __U, __m128d __A) {
// CHECK-LABEL: test_mm_mask_storeu_pd
- // CHECK: @llvm.masked.store.v2f64.p0(<2 x double> %{{.*}}, ptr %{{.*}}, i32 1, <2 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v2f64.p0(<2 x double> %{{.*}}, ptr align 1 %{{.*}}, <2 x i1> %{{.*}})
return _mm_mask_storeu_pd(__P, __U, __A);
}
void test_mm256_mask_storeu_pd(void *__P, __mmask8 __U, __m256d __A) {
// CHECK-LABEL: test_mm256_mask_storeu_pd
- // CHECK: @llvm.masked.store.v4f64.p0(<4 x double> %{{.*}}, ptr %{{.*}}, i32 1, <4 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v4f64.p0(<4 x double> %{{.*}}, ptr align 1 %{{.*}}, <4 x i1> %{{.*}})
return _mm256_mask_storeu_pd(__P, __U, __A);
}
void test_mm_mask_storeu_ps(void *__P, __mmask8 __U, __m128 __A) {
// CHECK-LABEL: test_mm_mask_storeu_ps
- // CHECK: @llvm.masked.store.v4f32.p0(<4 x float> %{{.*}}, ptr %{{.*}}, i32 1, <4 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v4f32.p0(<4 x float> %{{.*}}, ptr align 1 %{{.*}}, <4 x i1> %{{.*}})
return _mm_mask_storeu_ps(__P, __U, __A);
}
void test_mm256_mask_storeu_ps(void *__P, __mmask8 __U, __m256 __A) {
// CHECK-LABEL: test_mm256_mask_storeu_ps
- // CHECK: @llvm.masked.store.v8f32.p0(<8 x float> %{{.*}}, ptr %{{.*}}, i32 1, <8 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v8f32.p0(<8 x float> %{{.*}}, ptr align 1 %{{.*}}, <8 x i1> %{{.*}})
return _mm256_mask_storeu_ps(__P, __U, __A);
}
diff --git a/clang/test/CodeGen/X86/avx512vlbw-builtins.c b/clang/test/CodeGen/X86/avx512vlbw-builtins.c
index d569283..116d86f 100644
--- a/clang/test/CodeGen/X86/avx512vlbw-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vlbw-builtins.c
@@ -2061,6 +2061,7 @@ __m128i test_mm_mask_mulhrs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_mulhrs_epi16(__W, __U, __X, __Y);
}
+TEST_CONSTEXPR(match_v8hi(_mm_mask_mulhrs_epi16(_mm_set1_epi16(1), 0x0F, (__m128i)(__v8hi){+100, +200, -300, -400, +500, +600, -700, +800}, (__m128i)(__v8hi){+8000, -7000, +6000, -5000, +4000, -3000, +2000, -1000}), +24, -43, -55, +61, +1, +1, +1, +1));
__m128i test_mm_maskz_mulhrs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) {
// CHECK-LABEL: test_mm_maskz_mulhrs_epi16
@@ -2068,6 +2069,7 @@ __m128i test_mm_maskz_mulhrs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) {
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_mulhrs_epi16(__U, __X, __Y);
}
+TEST_CONSTEXPR(match_v8hi(_mm_maskz_mulhrs_epi16(0x0F, (__m128i)(__v8hi){+100, +200, -300, -400, +500, +600, -700, +800}, (__m128i)(__v8hi){+8000, -7000, +6000, -5000, +4000, -3000, +2000, -1000}), +24, -43, -55, +61, 0, 0, 0, 0));
__m256i test_mm256_mask_mulhrs_epi16(__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y) {
// CHECK-LABEL: test_mm256_mask_mulhrs_epi16
@@ -2075,6 +2077,7 @@ __m256i test_mm256_mask_mulhrs_epi16(__m256i __W, __mmask16 __U, __m256i __X, __
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_mulhrs_epi16(__W, __U, __X, __Y);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_mask_mulhrs_epi16(_mm256_set1_epi16(1), 0xF00F, (__m256i)(__v16hi){+100, +200, -300, -400, +500, +600, -700, +800, -900, -1000, +1100, +1200, -1300, -1400, +1500, +1600}, (__m256i)(__v16hi){+1600, -1500, +1400, -1300, +1200, -1100, +1000, -900, +800, -700, +600, -500, +400, -300, +200, -100}), +5, -9, -13, +16, +1, +1, +1, +1, +1, +1, +1, +1, -16, +13, +9, -5));
__m256i test_mm256_maskz_mulhrs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) {
// CHECK-LABEL: test_mm256_maskz_mulhrs_epi16
@@ -2082,6 +2085,7 @@ __m256i test_mm256_maskz_mulhrs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) {
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_mulhrs_epi16(__U, __X, __Y);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_maskz_mulhrs_epi16(0xF00F, (__m256i)(__v16hi){+100, +200, -300, -400, +500, +600, -700, +800, -900, -1000, +1100, +1200, -1300, -1400, +1500, +1600}, (__m256i)(__v16hi){+1600, -1500, +1400, -1300, +1200, -1100, +1000, -900, +800, -700, +600, -500, +400, -300, +200, -100}), +5, -9, -13, +16, 0, 0, 0, 0, 0, 0, 0, 0, -16, +13, +9, -5));
__m128i test_mm_mask_mulhi_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
// CHECK-LABEL: test_mm_mask_mulhi_epu16
@@ -2772,13 +2776,13 @@ __m128i test_mm_loadu_epi16(void const *__P) {
__m128i test_mm_mask_loadu_epi16(__m128i __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_mask_loadu_epi16
- // CHECK: @llvm.masked.load.v8i16.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x i16> %{{.*}})
+ // CHECK: @llvm.masked.load.v8i16.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x i16> %{{.*}})
return _mm_mask_loadu_epi16(__W, __U, __P);
}
__m128i test_mm_maskz_loadu_epi16(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_maskz_loadu_epi16
- // CHECK: @llvm.masked.load.v8i16.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x i16> %{{.*}})
+ // CHECK: @llvm.masked.load.v8i16.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x i16> %{{.*}})
return _mm_maskz_loadu_epi16(__U, __P);
}
@@ -2790,13 +2794,13 @@ __m256i test_mm256_loadu_epi16(void const *__P) {
__m256i test_mm256_mask_loadu_epi16(__m256i __W, __mmask16 __U, void const *__P) {
// CHECK-LABEL: test_mm256_mask_loadu_epi16
- // CHECK: @llvm.masked.load.v16i16.p0(ptr %{{.*}}, i32 1, <16 x i1> %{{.*}}, <16 x i16> %{{.*}})
+ // CHECK: @llvm.masked.load.v16i16.p0(ptr align 1 %{{.*}}, <16 x i1> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_mask_loadu_epi16(__W, __U, __P);
}
__m256i test_mm256_maskz_loadu_epi16(__mmask16 __U, void const *__P) {
// CHECK-LABEL: test_mm256_maskz_loadu_epi16
- // CHECK: @llvm.masked.load.v16i16.p0(ptr %{{.*}}, i32 1, <16 x i1> %{{.*}}, <16 x i16> %{{.*}})
+ // CHECK: @llvm.masked.load.v16i16.p0(ptr align 1 %{{.*}}, <16 x i1> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_maskz_loadu_epi16(__U, __P);
}
@@ -2808,13 +2812,13 @@ __m128i test_mm_loadu_epi8(void const *__P) {
__m128i test_mm_mask_loadu_epi8(__m128i __W, __mmask16 __U, void const *__P) {
// CHECK-LABEL: test_mm_mask_loadu_epi8
- // CHECK: @llvm.masked.load.v16i8.p0(ptr %{{.*}}, i32 1, <16 x i1> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK: @llvm.masked.load.v16i8.p0(ptr align 1 %{{.*}}, <16 x i1> %{{.*}}, <16 x i8> %{{.*}})
return _mm_mask_loadu_epi8(__W, __U, __P);
}
__m128i test_mm_maskz_loadu_epi8(__mmask16 __U, void const *__P) {
// CHECK-LABEL: test_mm_maskz_loadu_epi8
- // CHECK: @llvm.masked.load.v16i8.p0(ptr %{{.*}}, i32 1, <16 x i1> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK: @llvm.masked.load.v16i8.p0(ptr align 1 %{{.*}}, <16 x i1> %{{.*}}, <16 x i8> %{{.*}})
return _mm_maskz_loadu_epi8(__U, __P);
}
@@ -2826,13 +2830,13 @@ __m256i test_mm256_loadu_epi8(void const *__P) {
__m256i test_mm256_mask_loadu_epi8(__m256i __W, __mmask32 __U, void const *__P) {
// CHECK-LABEL: test_mm256_mask_loadu_epi8
- // CHECK: @llvm.masked.load.v32i8.p0(ptr %{{.*}}, i32 1, <32 x i1> %{{.*}}, <32 x i8> %{{.*}})
+ // CHECK: @llvm.masked.load.v32i8.p0(ptr align 1 %{{.*}}, <32 x i1> %{{.*}}, <32 x i8> %{{.*}})
return _mm256_mask_loadu_epi8(__W, __U, __P);
}
__m256i test_mm256_maskz_loadu_epi8(__mmask32 __U, void const *__P) {
// CHECK-LABEL: test_mm256_maskz_loadu_epi8
- // CHECK: @llvm.masked.load.v32i8.p0(ptr %{{.*}}, i32 1, <32 x i1> %{{.*}}, <32 x i8> %{{.*}})
+ // CHECK: @llvm.masked.load.v32i8.p0(ptr align 1 %{{.*}}, <32 x i1> %{{.*}}, <32 x i8> %{{.*}})
return _mm256_maskz_loadu_epi8(__U, __P);
}
@@ -2844,7 +2848,7 @@ void test_mm_storeu_epi16(void *__p, __m128i __a) {
void test_mm_mask_storeu_epi16(void *__P, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: test_mm_mask_storeu_epi16
- // CHECK: @llvm.masked.store.v8i16.p0(<8 x i16> %{{.*}}, ptr %{{.*}}, i32 1, <8 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v8i16.p0(<8 x i16> %{{.*}}, ptr align 1 %{{.*}}, <8 x i1> %{{.*}})
return _mm_mask_storeu_epi16(__P, __U, __A);
}
@@ -2856,7 +2860,7 @@ void test_mm256_storeu_epi16(void *__P, __m256i __A) {
void test_mm256_mask_storeu_epi16(void *__P, __mmask16 __U, __m256i __A) {
// CHECK-LABEL: test_mm256_mask_storeu_epi16
- // CHECK: @llvm.masked.store.v16i16.p0(<16 x i16> %{{.*}}, ptr %{{.*}}, i32 1, <16 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v16i16.p0(<16 x i16> %{{.*}}, ptr align 1 %{{.*}}, <16 x i1> %{{.*}})
return _mm256_mask_storeu_epi16(__P, __U, __A);
}
@@ -2868,7 +2872,7 @@ void test_mm_storeu_epi8(void *__p, __m128i __a) {
void test_mm_mask_storeu_epi8(void *__P, __mmask16 __U, __m128i __A) {
// CHECK-LABEL: test_mm_mask_storeu_epi8
- // CHECK: @llvm.masked.store.v16i8.p0(<16 x i8> %{{.*}}, ptr %{{.*}}, i32 1, <16 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v16i8.p0(<16 x i8> %{{.*}}, ptr align 1 %{{.*}}, <16 x i1> %{{.*}})
return _mm_mask_storeu_epi8(__P, __U, __A);
}
@@ -2880,7 +2884,7 @@ void test_mm256_storeu_epi8(void *__P, __m256i __A) {
void test_mm256_mask_storeu_epi8(void *__P, __mmask32 __U, __m256i __A) {
// CHECK-LABEL: test_mm256_mask_storeu_epi8
- // CHECK: @llvm.masked.store.v32i8.p0(<32 x i8> %{{.*}}, ptr %{{.*}}, i32 1, <32 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v32i8.p0(<32 x i8> %{{.*}}, ptr align 1 %{{.*}}, <32 x i1> %{{.*}})
return _mm256_mask_storeu_epi8(__P, __U, __A);
}
__mmask16 test_mm_test_epi8_mask(__m128i __A, __m128i __B) {
diff --git a/clang/test/CodeGen/X86/mmx-builtins.c b/clang/test/CodeGen/X86/mmx-builtins.c
index d9041d4..c1ac57b 100644
--- a/clang/test/CodeGen/X86/mmx-builtins.c
+++ b/clang/test/CodeGen/X86/mmx-builtins.c
@@ -438,6 +438,7 @@ __m64 test_mm_mulhrs_pi16(__m64 a, __m64 b) {
// CHECK: call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(
return _mm_mulhrs_pi16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_mulhrs_pi16((__m64)(__v4hi){+100, +200, -300, -400}, (__m64)(__v4hi){+30000, -20000, +10000, -5000}), +92, -122, -92, +61));
__m64 test_mm_mullo_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_mullo_pi16
diff --git a/clang/test/CodeGen/X86/ssse3-builtins.c b/clang/test/CodeGen/X86/ssse3-builtins.c
index 32abd9d..f70afc0 100644
--- a/clang/test/CodeGen/X86/ssse3-builtins.c
+++ b/clang/test/CodeGen/X86/ssse3-builtins.c
@@ -110,6 +110,7 @@ __m128i test_mm_mulhrs_epi16(__m128i a, __m128i b) {
// CHECK: call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
return _mm_mulhrs_epi16(a, b);
}
+TEST_CONSTEXPR(match_v8hi(_mm_mulhrs_epi16((__m128i)(__v8hi){+100, +200, -300, -400, +500, +600, -700, +800}, (__m128i)(__v8hi){+8000, -7000, +6000, -5000, +4000, -3000, +2000, -1000}), +24, -43, -55, +61, +61, -55, -43, -24));
__m128i test_mm_shuffle_epi8(__m128i a, __m128i b) {
// CHECK-LABEL: test_mm_shuffle_epi8
diff --git a/clang/test/CodeGen/arm-mve-intrinsics/cplusplus.cpp b/clang/test/CodeGen/arm-mve-intrinsics/cplusplus.cpp
index 2971961..4e3d0ce 100644
--- a/clang/test/CodeGen/arm-mve-intrinsics/cplusplus.cpp
+++ b/clang/test/CodeGen/arm-mve-intrinsics/cplusplus.cpp
@@ -114,7 +114,7 @@ uint16x8_t test_vld1q_u16(const uint16_t *base)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_s32(int32_t *base, int32x4_t value, mve_pred16_t p)
diff --git a/clang/test/CodeGen/arm-mve-intrinsics/dup.c b/clang/test/CodeGen/arm-mve-intrinsics/dup.c
index c2c7a9c..e7113fd 100644
--- a/clang/test/CodeGen/arm-mve-intrinsics/dup.c
+++ b/clang/test/CodeGen/arm-mve-intrinsics/dup.c
@@ -244,8 +244,7 @@ uint32x4_t test_vdupq_m_n_u32(uint32x4_t inactive, uint32_t a, mve_pred16_t p)
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[A:%.*]], i64 0
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
-// CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x half> [[DOTSPLAT]], <8 x half> undef
-// CHECK-NEXT: ret <8 x half> [[TMP2]]
+// CHECK-NEXT: ret <8 x half> [[DOTSPLAT]]
//
float16x8_t test_vdupq_x_n_f16(float16_t a, mve_pred16_t p)
{
@@ -258,8 +257,7 @@ float16x8_t test_vdupq_x_n_f16(float16_t a, mve_pred16_t p)
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[A:%.*]], i64 0
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
-// CHECK-NEXT: [[TMP2:%.*]] = select <4 x i1> [[TMP1]], <4 x float> [[DOTSPLAT]], <4 x float> undef
-// CHECK-NEXT: ret <4 x float> [[TMP2]]
+// CHECK-NEXT: ret <4 x float> [[DOTSPLAT]]
//
float32x4_t test_vdupq_x_n_f32(float32_t a, mve_pred16_t p)
{
@@ -272,8 +270,7 @@ float32x4_t test_vdupq_x_n_f32(float32_t a, mve_pred16_t p)
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[A:%.*]], i64 0
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
-// CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[TMP1]], <16 x i8> [[DOTSPLAT]], <16 x i8> undef
-// CHECK-NEXT: ret <16 x i8> [[TMP2]]
+// CHECK-NEXT: ret <16 x i8> [[DOTSPLAT]]
//
int8x16_t test_vdupq_x_n_s8(int8_t a, mve_pred16_t p)
{
@@ -286,8 +283,7 @@ int8x16_t test_vdupq_x_n_s8(int8_t a, mve_pred16_t p)
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[A:%.*]], i64 0
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
-// CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x i16> [[DOTSPLAT]], <8 x i16> undef
-// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+// CHECK-NEXT: ret <8 x i16> [[DOTSPLAT]]
//
int16x8_t test_vdupq_x_n_s16(int16_t a, mve_pred16_t p)
{
@@ -300,8 +296,7 @@ int16x8_t test_vdupq_x_n_s16(int16_t a, mve_pred16_t p)
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[A:%.*]], i64 0
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
-// CHECK-NEXT: [[TMP2:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[DOTSPLAT]], <4 x i32> undef
-// CHECK-NEXT: ret <4 x i32> [[TMP2]]
+// CHECK-NEXT: ret <4 x i32> [[DOTSPLAT]]
//
int32x4_t test_vdupq_x_n_s32(int32_t a, mve_pred16_t p)
{
@@ -314,8 +309,7 @@ int32x4_t test_vdupq_x_n_s32(int32_t a, mve_pred16_t p)
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[A:%.*]], i64 0
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
-// CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[TMP1]], <16 x i8> [[DOTSPLAT]], <16 x i8> undef
-// CHECK-NEXT: ret <16 x i8> [[TMP2]]
+// CHECK-NEXT: ret <16 x i8> [[DOTSPLAT]]
//
uint8x16_t test_vdupq_x_n_u8(uint8_t a, mve_pred16_t p)
{
@@ -328,8 +322,7 @@ uint8x16_t test_vdupq_x_n_u8(uint8_t a, mve_pred16_t p)
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[A:%.*]], i64 0
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
-// CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x i16> [[DOTSPLAT]], <8 x i16> undef
-// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+// CHECK-NEXT: ret <8 x i16> [[DOTSPLAT]]
//
uint16x8_t test_vdupq_x_n_u16(uint16_t a, mve_pred16_t p)
{
@@ -342,8 +335,7 @@ uint16x8_t test_vdupq_x_n_u16(uint16_t a, mve_pred16_t p)
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[A:%.*]], i64 0
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
-// CHECK-NEXT: [[TMP2:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[DOTSPLAT]], <4 x i32> undef
-// CHECK-NEXT: ret <4 x i32> [[TMP2]]
+// CHECK-NEXT: ret <4 x i32> [[DOTSPLAT]]
//
uint32x4_t test_vdupq_x_n_u32(uint32_t a, mve_pred16_t p)
{
diff --git a/clang/test/CodeGen/arm-mve-intrinsics/load-store.c b/clang/test/CodeGen/arm-mve-intrinsics/load-store.c
index 50f7011..ede2e95 100644
--- a/clang/test/CodeGen/arm-mve-intrinsics/load-store.c
+++ b/clang/test/CodeGen/arm-mve-intrinsics/load-store.c
@@ -122,7 +122,7 @@ uint32x4_t test_vld1q_u32(const uint32_t *base)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.masked.load.v8f16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]], <8 x half> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.masked.load.v8f16.p0(ptr align 2 [[BASE:%.*]], <8 x i1> [[TMP1]], <8 x half> zeroinitializer)
// CHECK-NEXT: ret <8 x half> [[TMP2]]
//
float16x8_t test_vld1q_z_f16(const float16_t *base, mve_pred16_t p)
@@ -138,7 +138,7 @@ float16x8_t test_vld1q_z_f16(const float16_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]], <4 x float> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]], <4 x float> zeroinitializer)
// CHECK-NEXT: ret <4 x float> [[TMP2]]
//
float32x4_t test_vld1q_z_f32(const float32_t *base, mve_pred16_t p)
@@ -154,7 +154,7 @@ float32x4_t test_vld1q_z_f32(const float32_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]], <16 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr align 1 [[BASE:%.*]], <16 x i1> [[TMP1]], <16 x i8> zeroinitializer)
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
//
int8x16_t test_vld1q_z_s8(const int8_t *base, mve_pred16_t p)
@@ -170,7 +170,7 @@ int8x16_t test_vld1q_z_s8(const int8_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]], <8 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr align 2 [[BASE:%.*]], <8 x i1> [[TMP1]], <8 x i16> zeroinitializer)
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
//
int16x8_t test_vld1q_z_s16(const int16_t *base, mve_pred16_t p)
@@ -186,7 +186,7 @@ int16x8_t test_vld1q_z_s16(const int16_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]], <4 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]], <4 x i32> zeroinitializer)
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
//
int32x4_t test_vld1q_z_s32(const int32_t *base, mve_pred16_t p)
@@ -202,7 +202,7 @@ int32x4_t test_vld1q_z_s32(const int32_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]], <16 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr align 1 [[BASE:%.*]], <16 x i1> [[TMP1]], <16 x i8> zeroinitializer)
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
//
uint8x16_t test_vld1q_z_u8(const uint8_t *base, mve_pred16_t p)
@@ -218,7 +218,7 @@ uint8x16_t test_vld1q_z_u8(const uint8_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]], <8 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr align 2 [[BASE:%.*]], <8 x i1> [[TMP1]], <8 x i16> zeroinitializer)
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
//
uint16x8_t test_vld1q_z_u16(const uint16_t *base, mve_pred16_t p)
@@ -234,7 +234,7 @@ uint16x8_t test_vld1q_z_u16(const uint16_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]], <4 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]], <4 x i32> zeroinitializer)
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
//
uint32x4_t test_vld1q_z_u32(const uint32_t *base, mve_pred16_t p)
@@ -314,7 +314,7 @@ uint32x4_t test_vldrbq_u32(const uint8_t *base)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]], <16 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr align 1 [[BASE:%.*]], <16 x i1> [[TMP1]], <16 x i8> zeroinitializer)
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
//
int8x16_t test_vldrbq_z_s8(const int8_t *base, mve_pred16_t p)
@@ -326,7 +326,7 @@ int8x16_t test_vldrbq_z_s8(const int8_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr [[BASE:%.*]], i32 1, <8 x i1> [[TMP1]], <8 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr align 1 [[BASE:%.*]], <8 x i1> [[TMP1]], <8 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP3:%.*]] = sext <8 x i8> [[TMP2]] to <8 x i16>
// CHECK-NEXT: ret <8 x i16> [[TMP3]]
//
@@ -339,7 +339,7 @@ int16x8_t test_vldrbq_z_s16(const int8_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr [[BASE:%.*]], i32 1, <4 x i1> [[TMP1]], <4 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr align 1 [[BASE:%.*]], <4 x i1> [[TMP1]], <4 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i8> [[TMP2]] to <4 x i32>
// CHECK-NEXT: ret <4 x i32> [[TMP3]]
//
@@ -352,7 +352,7 @@ int32x4_t test_vldrbq_z_s32(const int8_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]], <16 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr align 1 [[BASE:%.*]], <16 x i1> [[TMP1]], <16 x i8> zeroinitializer)
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
//
uint8x16_t test_vldrbq_z_u8(const uint8_t *base, mve_pred16_t p)
@@ -364,7 +364,7 @@ uint8x16_t test_vldrbq_z_u8(const uint8_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr [[BASE:%.*]], i32 1, <8 x i1> [[TMP1]], <8 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr align 1 [[BASE:%.*]], <8 x i1> [[TMP1]], <8 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP3:%.*]] = zext <8 x i8> [[TMP2]] to <8 x i16>
// CHECK-NEXT: ret <8 x i16> [[TMP3]]
//
@@ -377,7 +377,7 @@ uint16x8_t test_vldrbq_z_u16(const uint8_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr [[BASE:%.*]], i32 1, <4 x i1> [[TMP1]], <4 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr align 1 [[BASE:%.*]], <4 x i1> [[TMP1]], <4 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i8> [[TMP2]] to <4 x i32>
// CHECK-NEXT: ret <4 x i32> [[TMP3]]
//
@@ -442,7 +442,7 @@ uint32x4_t test_vldrhq_u32(const uint16_t *base)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.masked.load.v8f16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]], <8 x half> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.masked.load.v8f16.p0(ptr align 2 [[BASE:%.*]], <8 x i1> [[TMP1]], <8 x half> zeroinitializer)
// CHECK-NEXT: ret <8 x half> [[TMP2]]
//
float16x8_t test_vldrhq_z_f16(const float16_t *base, mve_pred16_t p)
@@ -454,7 +454,7 @@ float16x8_t test_vldrhq_z_f16(const float16_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]], <8 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr align 2 [[BASE:%.*]], <8 x i1> [[TMP1]], <8 x i16> zeroinitializer)
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
//
int16x8_t test_vldrhq_z_s16(const int16_t *base, mve_pred16_t p)
@@ -466,7 +466,7 @@ int16x8_t test_vldrhq_z_s16(const int16_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr [[BASE:%.*]], i32 2, <4 x i1> [[TMP1]], <4 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr align 2 [[BASE:%.*]], <4 x i1> [[TMP1]], <4 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i16> [[TMP2]] to <4 x i32>
// CHECK-NEXT: ret <4 x i32> [[TMP3]]
//
@@ -479,7 +479,7 @@ int32x4_t test_vldrhq_z_s32(const int16_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]], <8 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr align 2 [[BASE:%.*]], <8 x i1> [[TMP1]], <8 x i16> zeroinitializer)
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
//
uint16x8_t test_vldrhq_z_u16(const uint16_t *base, mve_pred16_t p)
@@ -491,7 +491,7 @@ uint16x8_t test_vldrhq_z_u16(const uint16_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr [[BASE:%.*]], i32 2, <4 x i1> [[TMP1]], <4 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr align 2 [[BASE:%.*]], <4 x i1> [[TMP1]], <4 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i16> [[TMP2]] to <4 x i32>
// CHECK-NEXT: ret <4 x i32> [[TMP3]]
//
@@ -534,7 +534,7 @@ uint32x4_t test_vldrwq_u32(const uint32_t *base)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]], <4 x float> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]], <4 x float> zeroinitializer)
// CHECK-NEXT: ret <4 x float> [[TMP2]]
//
float32x4_t test_vldrwq_z_f32(const float32_t *base, mve_pred16_t p)
@@ -546,7 +546,7 @@ float32x4_t test_vldrwq_z_f32(const float32_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]], <4 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]], <4 x i32> zeroinitializer)
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
//
int32x4_t test_vldrwq_z_s32(const int32_t *base, mve_pred16_t p)
@@ -558,7 +558,7 @@ int32x4_t test_vldrwq_z_s32(const int32_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]], <4 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]], <4 x i32> zeroinitializer)
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
//
uint32x4_t test_vldrwq_z_u32(const uint32_t *base, mve_pred16_t p)
@@ -682,7 +682,7 @@ void test_vst1q_u32(uint32_t *base, uint32x4_t value)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v8f16.p0(<8 x half> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v8f16.p0(<8 x half> [[VALUE:%.*]], ptr align 2 [[BASE:%.*]], <8 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_f16(float16_t *base, float16x8_t value, mve_pred16_t p)
@@ -698,7 +698,7 @@ void test_vst1q_p_f16(float16_t *base, float16x8_t value, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[VALUE:%.*]], ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_f32(float32_t *base, float32x4_t value, mve_pred16_t p)
@@ -714,7 +714,7 @@ void test_vst1q_p_f32(float32_t *base, float32x4_t value, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr align 1 [[BASE:%.*]], <16 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_s8(int8_t *base, int8x16_t value, mve_pred16_t p)
@@ -730,7 +730,7 @@ void test_vst1q_p_s8(int8_t *base, int8x16_t value, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr align 2 [[BASE:%.*]], <8 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_s16(int16_t *base, int16x8_t value, mve_pred16_t p)
@@ -746,7 +746,7 @@ void test_vst1q_p_s16(int16_t *base, int16x8_t value, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_s32(int32_t *base, int32x4_t value, mve_pred16_t p)
@@ -762,7 +762,7 @@ void test_vst1q_p_s32(int32_t *base, int32x4_t value, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr align 1 [[BASE:%.*]], <16 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_u8(uint8_t *base, uint8x16_t value, mve_pred16_t p)
@@ -778,7 +778,7 @@ void test_vst1q_p_u8(uint8_t *base, uint8x16_t value, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr align 2 [[BASE:%.*]], <8 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_u16(uint16_t *base, uint16x8_t value, mve_pred16_t p)
@@ -794,7 +794,7 @@ void test_vst1q_p_u16(uint16_t *base, uint16x8_t value, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_u32(uint32_t *base, uint32x4_t value, mve_pred16_t p)
@@ -898,7 +898,7 @@ void test_vstrbq_u32(uint8_t *base, uint32x4_t value)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr align 1 [[BASE:%.*]], <16 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vstrbq_p_s8(int8_t *base, int8x16_t value, mve_pred16_t p)
@@ -915,7 +915,7 @@ void test_vstrbq_p_s8(int8_t *base, int8x16_t value, mve_pred16_t p)
// CHECK-NEXT: [[TMP0:%.*]] = trunc <8 x i16> [[VALUE:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v8i8.p0(<8 x i8> [[TMP0]], ptr [[BASE:%.*]], i32 1, <8 x i1> [[TMP2]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i8.p0(<8 x i8> [[TMP0]], ptr align 1 [[BASE:%.*]], <8 x i1> [[TMP2]])
// CHECK-NEXT: ret void
//
void test_vstrbq_p_s16(int8_t *base, int16x8_t value, mve_pred16_t p)
@@ -932,7 +932,7 @@ void test_vstrbq_p_s16(int8_t *base, int16x8_t value, mve_pred16_t p)
// CHECK-NEXT: [[TMP0:%.*]] = trunc <4 x i32> [[VALUE:%.*]] to <4 x i8>
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i8.p0(<4 x i8> [[TMP0]], ptr [[BASE:%.*]], i32 1, <4 x i1> [[TMP2]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i8.p0(<4 x i8> [[TMP0]], ptr align 1 [[BASE:%.*]], <4 x i1> [[TMP2]])
// CHECK-NEXT: ret void
//
void test_vstrbq_p_s32(int8_t *base, int32x4_t value, mve_pred16_t p)
@@ -948,7 +948,7 @@ void test_vstrbq_p_s32(int8_t *base, int32x4_t value, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr align 1 [[BASE:%.*]], <16 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vstrbq_p_u8(uint8_t *base, uint8x16_t value, mve_pred16_t p)
@@ -965,7 +965,7 @@ void test_vstrbq_p_u8(uint8_t *base, uint8x16_t value, mve_pred16_t p)
// CHECK-NEXT: [[TMP0:%.*]] = trunc <8 x i16> [[VALUE:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v8i8.p0(<8 x i8> [[TMP0]], ptr [[BASE:%.*]], i32 1, <8 x i1> [[TMP2]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i8.p0(<8 x i8> [[TMP0]], ptr align 1 [[BASE:%.*]], <8 x i1> [[TMP2]])
// CHECK-NEXT: ret void
//
void test_vstrbq_p_u16(uint8_t *base, uint16x8_t value, mve_pred16_t p)
@@ -982,7 +982,7 @@ void test_vstrbq_p_u16(uint8_t *base, uint16x8_t value, mve_pred16_t p)
// CHECK-NEXT: [[TMP0:%.*]] = trunc <4 x i32> [[VALUE:%.*]] to <4 x i8>
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i8.p0(<4 x i8> [[TMP0]], ptr [[BASE:%.*]], i32 1, <4 x i1> [[TMP2]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i8.p0(<4 x i8> [[TMP0]], ptr align 1 [[BASE:%.*]], <4 x i1> [[TMP2]])
// CHECK-NEXT: ret void
//
void test_vstrbq_p_u32(uint8_t *base, uint32x4_t value, mve_pred16_t p)
@@ -1070,7 +1070,7 @@ void test_vstrhq_u32(uint16_t *base, uint32x4_t value)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v8f16.p0(<8 x half> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v8f16.p0(<8 x half> [[VALUE:%.*]], ptr align 2 [[BASE:%.*]], <8 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vstrhq_p_f16(float16_t *base, float16x8_t value, mve_pred16_t p)
@@ -1086,7 +1086,7 @@ void test_vstrhq_p_f16(float16_t *base, float16x8_t value, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr align 2 [[BASE:%.*]], <8 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vstrhq_p_s16(int16_t *base, int16x8_t value, mve_pred16_t p)
@@ -1103,7 +1103,7 @@ void test_vstrhq_p_s16(int16_t *base, int16x8_t value, mve_pred16_t p)
// CHECK-NEXT: [[TMP0:%.*]] = trunc <4 x i32> [[VALUE:%.*]] to <4 x i16>
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i16.p0(<4 x i16> [[TMP0]], ptr [[BASE:%.*]], i32 2, <4 x i1> [[TMP2]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i16.p0(<4 x i16> [[TMP0]], ptr align 2 [[BASE:%.*]], <4 x i1> [[TMP2]])
// CHECK-NEXT: ret void
//
void test_vstrhq_p_s32(int16_t *base, int32x4_t value, mve_pred16_t p)
@@ -1119,7 +1119,7 @@ void test_vstrhq_p_s32(int16_t *base, int32x4_t value, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr align 2 [[BASE:%.*]], <8 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vstrhq_p_u16(uint16_t *base, uint16x8_t value, mve_pred16_t p)
@@ -1136,7 +1136,7 @@ void test_vstrhq_p_u16(uint16_t *base, uint16x8_t value, mve_pred16_t p)
// CHECK-NEXT: [[TMP0:%.*]] = trunc <4 x i32> [[VALUE:%.*]] to <4 x i16>
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i16.p0(<4 x i16> [[TMP0]], ptr [[BASE:%.*]], i32 2, <4 x i1> [[TMP2]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i16.p0(<4 x i16> [[TMP0]], ptr align 2 [[BASE:%.*]], <4 x i1> [[TMP2]])
// CHECK-NEXT: ret void
//
void test_vstrhq_p_u32(uint16_t *base, uint32x4_t value, mve_pred16_t p)
@@ -1194,7 +1194,7 @@ void test_vstrwq_u32(uint32_t *base, uint32x4_t value)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[VALUE:%.*]], ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vstrwq_p_f32(float32_t *base, float32x4_t value, mve_pred16_t p)
@@ -1210,7 +1210,7 @@ void test_vstrwq_p_f32(float32_t *base, float32x4_t value, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vstrwq_p_s32(int32_t *base, int32x4_t value, mve_pred16_t p)
@@ -1226,7 +1226,7 @@ void test_vstrwq_p_s32(int32_t *base, int32x4_t value, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vstrwq_p_u32(uint32_t *base, uint32x4_t value, mve_pred16_t p)
diff --git a/clang/test/CodeGen/builtin-masked.c b/clang/test/CodeGen/builtin-masked.c
index e2b5e09..28b94b7 100644
--- a/clang/test/CodeGen/builtin-masked.c
+++ b/clang/test/CodeGen/builtin-masked.c
@@ -19,7 +19,7 @@ typedef _Bool v8b __attribute__((ext_vector_type(8)));
// CHECK-NEXT: [[LOAD_BITS2:%.*]] = load i8, ptr [[M_ADDR]], align 1
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1>
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[P_ADDR]], align 8
-// CHECK-NEXT: [[MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr [[TMP2]], i32 4, <8 x i1> [[TMP1]], <8 x i32> poison)
+// CHECK-NEXT: [[MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr align 4 [[TMP2]], <8 x i1> [[TMP1]], <8 x i32> poison)
// CHECK-NEXT: ret <8 x i32> [[MASKED_LOAD]]
//
v8i test_load(v8b m, int *p) {
@@ -45,7 +45,7 @@ v8i test_load(v8b m, int *p) {
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1>
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[P_ADDR]], align 8
// CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr [[T_ADDR]], align 32
-// CHECK-NEXT: [[MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr [[TMP3]], i32 4, <8 x i1> [[TMP2]], <8 x i32> [[TMP4]])
+// CHECK-NEXT: [[MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr align 4 [[TMP3]], <8 x i1> [[TMP2]], <8 x i32> [[TMP4]])
// CHECK-NEXT: ret <8 x i32> [[MASKED_LOAD]]
//
v8i test_load_passthru(v8b m, int *p, v8i t) {
@@ -97,7 +97,7 @@ v8i test_load_expand(v8b m, int *p, v8i t) {
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1>
// CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr [[V_ADDR]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[P_ADDR]], align 8
-// CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP3]], ptr [[TMP4]], i32 4, <8 x i1> [[TMP2]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP3]], ptr align 4 [[TMP4]], <8 x i1> [[TMP2]])
// CHECK-NEXT: ret void
//
void test_store(v8b m, v8i v, int *p) {
@@ -150,7 +150,7 @@ void test_compress_store(v8b m, v8i v, int *p) {
// CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr [[IDX_ADDR]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[TMP4]], <8 x i32> [[TMP3]]
-// CHECK-NEXT: [[MASKED_GATHER:%.*]] = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> [[TMP5]], i32 4, <8 x i1> [[TMP2]], <8 x i32> poison)
+// CHECK-NEXT: [[MASKED_GATHER:%.*]] = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> align 4 [[TMP5]], <8 x i1> [[TMP2]], <8 x i32> poison)
// CHECK-NEXT: ret <8 x i32> [[MASKED_GATHER]]
//
v8i test_gather(v8b mask, v8i idx, int *ptr) {
@@ -181,7 +181,7 @@ v8i test_gather(v8b mask, v8i idx, int *ptr) {
// CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr [[IDX_ADDR]], align 32
// CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[TMP6]], <8 x i32> [[TMP4]]
-// CHECK-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[TMP5]], <8 x ptr> [[TMP7]], i32 4, <8 x i1> [[TMP3]])
+// CHECK-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[TMP5]], <8 x ptr> align 4 [[TMP7]], <8 x i1> [[TMP3]])
// CHECK-NEXT: ret void
//
void test_scatter(v8b mask, v8i val, v8i idx, int *ptr) {
@@ -203,7 +203,7 @@ void test_scatter(v8b mask, v8i val, v8i idx, int *ptr) {
// CHECK-NEXT: [[LOAD_BITS2:%.*]] = load i8, ptr [[MASK_ADDR]], align 1
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1>
// CHECK-NEXT: [[TMP2:%.*]] = load ptr addrspace(42), ptr [[PTR_ADDR]], align 8
-// CHECK-NEXT: [[MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p42(ptr addrspace(42) [[TMP2]], i32 4, <8 x i1> [[TMP1]], <8 x i32> poison)
+// CHECK-NEXT: [[MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p42(ptr addrspace(42) align 4 [[TMP2]], <8 x i1> [[TMP1]], <8 x i32> poison)
// CHECK-NEXT: ret <8 x i32> [[MASKED_LOAD]]
//
v8i test_load_as(v8b mask, int __attribute__((address_space(42))) * ptr) {
@@ -229,7 +229,7 @@ v8i test_load_as(v8b mask, int __attribute__((address_space(42))) * ptr) {
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1>
// CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr [[V_ADDR]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load ptr addrspace(42), ptr [[P_ADDR]], align 8
-// CHECK-NEXT: call void @llvm.masked.store.v8i32.p42(<8 x i32> [[TMP3]], ptr addrspace(42) [[TMP4]], i32 4, <8 x i1> [[TMP2]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i32.p42(<8 x i32> [[TMP3]], ptr addrspace(42) align 4 [[TMP4]], <8 x i1> [[TMP2]])
// CHECK-NEXT: ret void
//
void test_store_as(v8b m, v8i v, int __attribute__((address_space(42))) *p) {
@@ -256,7 +256,7 @@ void test_store_as(v8b m, v8i v, int __attribute__((address_space(42))) *p) {
// CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr [[IDX_ADDR]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load ptr addrspace(42), ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr addrspace(42) [[TMP4]], <8 x i32> [[TMP3]]
-// CHECK-NEXT: [[MASKED_GATHER:%.*]] = call <8 x i32> @llvm.masked.gather.v8i32.v8p42(<8 x ptr addrspace(42)> [[TMP5]], i32 4, <8 x i1> [[TMP2]], <8 x i32> poison)
+// CHECK-NEXT: [[MASKED_GATHER:%.*]] = call <8 x i32> @llvm.masked.gather.v8i32.v8p42(<8 x ptr addrspace(42)> align 4 [[TMP5]], <8 x i1> [[TMP2]], <8 x i32> poison)
// CHECK-NEXT: ret <8 x i32> [[MASKED_GATHER]]
//
v8i test_gather_as(v8b mask, v8i idx, int __attribute__((address_space(42))) *ptr) {
@@ -287,7 +287,7 @@ v8i test_gather_as(v8b mask, v8i idx, int __attribute__((address_space(42))) *pt
// CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr [[IDX_ADDR]], align 32
// CHECK-NEXT: [[TMP6:%.*]] = load ptr addrspace(42), ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr addrspace(42) [[TMP6]], <8 x i32> [[TMP4]]
-// CHECK-NEXT: call void @llvm.masked.scatter.v8i32.v8p42(<8 x i32> [[TMP5]], <8 x ptr addrspace(42)> [[TMP7]], i32 4, <8 x i1> [[TMP3]])
+// CHECK-NEXT: call void @llvm.masked.scatter.v8i32.v8p42(<8 x i32> [[TMP5]], <8 x ptr addrspace(42)> align 4 [[TMP7]], <8 x i1> [[TMP3]])
// CHECK-NEXT: ret void
//
void test_scatter_as(v8b mask, v8i val, v8i idx, int __attribute__((address_space(42))) *ptr) {
diff --git a/clang/test/CodeGen/c11atomics-ios.c b/clang/test/CodeGen/c11atomics-ios.c
index 8310270..34843ef 100644
--- a/clang/test/CodeGen/c11atomics-ios.c
+++ b/clang/test/CodeGen/c11atomics-ios.c
@@ -235,6 +235,7 @@ _Bool test_promoted_cmpxchg(_Atomic(PS) *addr, PS *desired, PS *new) {
// CHECK: [[ATOMIC_DESIRED:%.*]] = alloca { %struct.PS, [2 x i8] }, align 8
// CHECK: [[ATOMIC_NEW:%.*]] = alloca { %struct.PS, [2 x i8] }, align 8
// CHECK: [[RES_ADDR:%.*]] = alloca i8, align 1
+ // CHECK: [[OLD_TMP:%.*]] = alloca i64, align 8
// CHECK: store ptr %addr, ptr [[ADDR_ARG]], align 4
// CHECK: store ptr %desired, ptr [[DESIRED_ARG]], align 4
// CHECK: store ptr %new, ptr [[NEW_ARG]], align 4
@@ -251,7 +252,8 @@ _Bool test_promoted_cmpxchg(_Atomic(PS) *addr, PS *desired, PS *new) {
// CHECK: [[RES_BOOL:%.*]] = extractvalue { i64, i1 } [[RES]], 1
// CHECK: br i1 [[RES_BOOL]], label {{%.*}}, label {{%.*}}
- // CHECK: store i64 [[RES_VAL64]], ptr [[ATOMIC_DESIRED]], align 8
+ // CHECK: store i64 [[RES_VAL64]], ptr [[OLD_TMP]], align 8
+ // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 2 [[DESIRED_ARG:%.*]], ptr align 8 [[OLD_TMP]], i64 6, i1 false)
// CHECK: br label {{%.*}}
// CHECK: [[RES_BOOL8:%.*]] = zext i1 [[RES_BOOL]] to i8
diff --git a/clang/test/CodeGenCXX/builtin-atomic-compare_exchange.cpp b/clang/test/CodeGenCXX/builtin-atomic-compare_exchange.cpp
new file mode 100644
index 0000000..4f1fe98
--- /dev/null
+++ b/clang/test/CodeGenCXX/builtin-atomic-compare_exchange.cpp
@@ -0,0 +1,130 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// RUN: %clang_cc1 -std=c++20 -triple=x86_64-linux-gnu -emit-llvm -o - %s | FileCheck %s
+
+
+template <unsigned Size>
+struct S {
+ char data[Size];
+};
+
+// CHECK-LABEL: define dso_local noundef zeroext i1 @_Z21test_compare_exchangePU7_Atomic1SILj3EEPS0_S0_(
+// CHECK-SAME: ptr noundef [[A:%.*]], ptr noundef [[EXPECTED:%.*]], i24 [[DESIRED_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[DESIRED:%.*]] = alloca [[STRUCT_S:%.*]], align 1
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[EXPECTED_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca [[STRUCT_S]], align 1
+// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca { [[STRUCT_S]], [1 x i8] }, align 4
+// CHECK-NEXT: [[ATOMIC_TEMP1:%.*]] = alloca { [[STRUCT_S]], [1 x i8] }, align 4
+// CHECK-NEXT: [[CMPXCHG_BOOL:%.*]] = alloca i8, align 1
+// CHECK-NEXT: [[OLD_TMP:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_S]], ptr [[DESIRED]], i32 0, i32 0
+// CHECK-NEXT: store i24 [[DESIRED_COERCE]], ptr [[COERCE_DIVE]], align 1
+// CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
+// CHECK-NEXT: store ptr [[EXPECTED]], ptr [[EXPECTED_ADDR]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[EXPECTED_ADDR]], align 8
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DOTATOMICTMP]], ptr align 1 [[DESIRED]], i64 3, i1 false)
+// CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ATOMIC_TEMP]], i8 0, i64 4, i1 false)
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ATOMIC_TEMP]], ptr align 1 [[TMP1]], i64 3, i1 false)
+// CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ATOMIC_TEMP1]], i8 0, i64 4, i1 false)
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ATOMIC_TEMP1]], ptr align 1 [[DOTATOMICTMP]], i64 3, i1 false)
+// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP1]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr [[TMP0]], i32 [[TMP2]], i32 [[TMP3]] monotonic monotonic, align 4
+// CHECK-NEXT: [[TMP5:%.*]] = extractvalue { i32, i1 } [[TMP4]], 0
+// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+// CHECK-NEXT: br i1 [[TMP6]], label %[[CMPXCHG_CONTINUE:.*]], label %[[CMPXCHG_STORE_EXPECTED:.*]]
+// CHECK: [[CMPXCHG_STORE_EXPECTED]]:
+// CHECK-NEXT: store i32 [[TMP5]], ptr [[OLD_TMP]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[TMP1]], ptr align 4 [[OLD_TMP]], i64 3, i1 false)
+// CHECK-NEXT: br label %[[CMPXCHG_CONTINUE]]
+// CHECK: [[CMPXCHG_CONTINUE]]:
+// CHECK-NEXT: [[STOREDV:%.*]] = zext i1 [[TMP6]] to i8
+// CHECK-NEXT: store i8 [[STOREDV]], ptr [[CMPXCHG_BOOL]], align 1
+// CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[CMPXCHG_BOOL]], align 1
+// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP7]] to i1
+// CHECK-NEXT: ret i1 [[LOADEDV]]
+//
+bool test_compare_exchange(_Atomic(S<3>)* a, S<3>* expected, S<3> desired) {
+ return __c11_atomic_compare_exchange_strong(a, expected, desired, 0, 0);
+}
+
+
+// CHECK-LABEL: define dso_local noundef zeroext i1 @_Z21test_compare_exchangePU7_Atomic1SILj4EEPS0_S0_(
+// CHECK-SAME: ptr noundef [[A:%.*]], ptr noundef [[EXPECTED:%.*]], i32 [[DESIRED_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[DESIRED:%.*]] = alloca [[STRUCT_S_0:%.*]], align 1
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[EXPECTED_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca [[STRUCT_S_0]], align 1
+// CHECK-NEXT: [[CMPXCHG_BOOL:%.*]] = alloca i8, align 1
+// CHECK-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_S_0]], ptr [[DESIRED]], i32 0, i32 0
+// CHECK-NEXT: store i32 [[DESIRED_COERCE]], ptr [[COERCE_DIVE]], align 1
+// CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
+// CHECK-NEXT: store ptr [[EXPECTED]], ptr [[EXPECTED_ADDR]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[EXPECTED_ADDR]], align 8
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DOTATOMICTMP]], ptr align 1 [[DESIRED]], i64 4, i1 false)
+// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 1
+// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 1
+// CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr [[TMP0]], i32 [[TMP2]], i32 [[TMP3]] monotonic monotonic, align 4
+// CHECK-NEXT: [[TMP5:%.*]] = extractvalue { i32, i1 } [[TMP4]], 0
+// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+// CHECK-NEXT: br i1 [[TMP6]], label %[[CMPXCHG_CONTINUE:.*]], label %[[CMPXCHG_STORE_EXPECTED:.*]]
+// CHECK: [[CMPXCHG_STORE_EXPECTED]]:
+// CHECK-NEXT: store i32 [[TMP5]], ptr [[TMP1]], align 1
+// CHECK-NEXT: br label %[[CMPXCHG_CONTINUE]]
+// CHECK: [[CMPXCHG_CONTINUE]]:
+// CHECK-NEXT: [[STOREDV:%.*]] = zext i1 [[TMP6]] to i8
+// CHECK-NEXT: store i8 [[STOREDV]], ptr [[CMPXCHG_BOOL]], align 1
+// CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[CMPXCHG_BOOL]], align 1
+// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP7]] to i1
+// CHECK-NEXT: ret i1 [[LOADEDV]]
+//
+bool test_compare_exchange(_Atomic(S<4>)* a, S<4>* expected, S<4> desired) {
+ return __c11_atomic_compare_exchange_strong(a, expected, desired, 0, 0);
+}
+
+// CHECK-LABEL: define dso_local noundef zeroext i1 @_Z21test_compare_exchangePU7_Atomic1SILj6EEPS0_S0_(
+// CHECK-SAME: ptr noundef [[A:%.*]], ptr noundef [[EXPECTED:%.*]], i48 [[DESIRED_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[DESIRED:%.*]] = alloca [[STRUCT_S_1:%.*]], align 1
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[EXPECTED_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca [[STRUCT_S_1]], align 1
+// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca { [[STRUCT_S_1]], [2 x i8] }, align 8
+// CHECK-NEXT: [[ATOMIC_TEMP1:%.*]] = alloca { [[STRUCT_S_1]], [2 x i8] }, align 8
+// CHECK-NEXT: [[CMPXCHG_BOOL:%.*]] = alloca i8, align 1
+// CHECK-NEXT: [[OLD_TMP:%.*]] = alloca i64, align 8
+// CHECK-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_S_1]], ptr [[DESIRED]], i32 0, i32 0
+// CHECK-NEXT: store i48 [[DESIRED_COERCE]], ptr [[COERCE_DIVE]], align 1
+// CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
+// CHECK-NEXT: store ptr [[EXPECTED]], ptr [[EXPECTED_ADDR]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[EXPECTED_ADDR]], align 8
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DOTATOMICTMP]], ptr align 1 [[DESIRED]], i64 6, i1 false)
+// CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[ATOMIC_TEMP]], i8 0, i64 8, i1 false)
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[ATOMIC_TEMP]], ptr align 1 [[TMP1]], i64 6, i1 false)
+// CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[ATOMIC_TEMP1]], i8 0, i64 8, i1 false)
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[ATOMIC_TEMP1]], ptr align 1 [[DOTATOMICTMP]], i64 6, i1 false)
+// CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[ATOMIC_TEMP]], align 8
+// CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[ATOMIC_TEMP1]], align 8
+// CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr [[TMP0]], i64 [[TMP2]], i64 [[TMP3]] monotonic monotonic, align 8
+// CHECK-NEXT: [[TMP5:%.*]] = extractvalue { i64, i1 } [[TMP4]], 0
+// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i64, i1 } [[TMP4]], 1
+// CHECK-NEXT: br i1 [[TMP6]], label %[[CMPXCHG_CONTINUE:.*]], label %[[CMPXCHG_STORE_EXPECTED:.*]]
+// CHECK: [[CMPXCHG_STORE_EXPECTED]]:
+// CHECK-NEXT: store i64 [[TMP5]], ptr [[OLD_TMP]], align 8
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[TMP1]], ptr align 8 [[OLD_TMP]], i64 6, i1 false)
+// CHECK-NEXT: br label %[[CMPXCHG_CONTINUE]]
+// CHECK: [[CMPXCHG_CONTINUE]]:
+// CHECK-NEXT: [[STOREDV:%.*]] = zext i1 [[TMP6]] to i8
+// CHECK-NEXT: store i8 [[STOREDV]], ptr [[CMPXCHG_BOOL]], align 1
+// CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[CMPXCHG_BOOL]], align 1
+// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP7]] to i1
+// CHECK-NEXT: ret i1 [[LOADEDV]]
+//
+bool test_compare_exchange(_Atomic(S<6>)* a, S<6>* expected, S<6> desired) {
+ return __c11_atomic_compare_exchange_strong(a, expected, desired, 0, 0);
+}
diff --git a/clang/test/Frontend/rewrite-includes-bom.c b/clang/test/Frontend/rewrite-includes-bom.c
index caa431a..27bf470 100644
--- a/clang/test/Frontend/rewrite-includes-bom.c
+++ b/clang/test/Frontend/rewrite-includes-bom.c
@@ -1,8 +1,8 @@
-// RUN: grep -q $'^\xEF\xBB\xBF' %S/Inputs/rewrite-includes-bom.h
+// RUN: cat %S/Inputs/rewrite-includes-bom.h | od -t x1 | grep -q 'ef\s*bb\s*bf'
// RUN: %clang_cc1 -E -frewrite-includes -I %S/Inputs %s -o %t.c
-// RUN: ! grep -q $'\xEF\xBB\xBF' %t.c
+// RUN: cat %t.c | od -t x1 | not grep -q 'ef\s*bb\s*bf'
// RUN: %clang_cc1 -fsyntax-only -verify %t.c
// expected-no-diagnostics
-// REQUIRES: shell
+// UNSUPPORTED: system-windows
#include "rewrite-includes-bom.h"
diff --git a/clang/test/Headers/wasm.c b/clang/test/Headers/wasm.c
index 7f427ca..03f20c5 100644
--- a/clang/test/Headers/wasm.c
+++ b/clang/test/Headers/wasm.c
@@ -612,7 +612,7 @@ v128_t test_f64x2_const_splat(void) {
return wasm_f64x2_const_splat(42);
}
-// CHECK-LABEL: define hidden <4 x i32> @test_i8x16_splat(
+// CHECK-LABEL: define hidden noundef <4 x i32> @test_i8x16_splat(
// CHECK-SAME: i8 noundef signext [[A:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[VECINIT_I:%.*]] = insertelement <16 x i8> poison, i8 [[A]], i64 0
@@ -624,7 +624,7 @@ v128_t test_i8x16_splat(int8_t a) {
return wasm_i8x16_splat(a);
}
-// CHECK-LABEL: define hidden <4 x i32> @test_u8x16_splat(
+// CHECK-LABEL: define hidden noundef <4 x i32> @test_u8x16_splat(
// CHECK-SAME: i8 noundef zeroext [[A:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[VECINIT_I:%.*]] = insertelement <16 x i8> poison, i8 [[A]], i64 0
@@ -682,7 +682,7 @@ v128_t test_u8x16_replace_lane(v128_t a, uint8_t b) {
return wasm_u8x16_replace_lane(a, 15, b);
}
-// CHECK-LABEL: define hidden <4 x i32> @test_i16x8_splat(
+// CHECK-LABEL: define hidden noundef <4 x i32> @test_i16x8_splat(
// CHECK-SAME: i16 noundef signext [[A:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[VECINIT_I:%.*]] = insertelement <8 x i16> poison, i16 [[A]], i64 0
@@ -694,7 +694,7 @@ v128_t test_i16x8_splat(int16_t a) {
return wasm_i16x8_splat(a);
}
-// CHECK-LABEL: define hidden <4 x i32> @test_u16x8_splat(
+// CHECK-LABEL: define hidden noundef <4 x i32> @test_u16x8_splat(
// CHECK-SAME: i16 noundef zeroext [[A:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[VECINIT_I:%.*]] = insertelement <8 x i16> poison, i16 [[A]], i64 0
@@ -752,7 +752,7 @@ v128_t test_u16x8_replace_lane(v128_t a, uint16_t b) {
return wasm_u16x8_replace_lane(a, 7, b);
}
-// CHECK-LABEL: define hidden <4 x i32> @test_i32x4_splat(
+// CHECK-LABEL: define hidden noundef <4 x i32> @test_i32x4_splat(
// CHECK-SAME: i32 noundef [[A:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[VECINIT_I:%.*]] = insertelement <4 x i32> poison, i32 [[A]], i64 0
@@ -763,7 +763,7 @@ v128_t test_i32x4_splat(int32_t a) {
return wasm_i32x4_splat(a);
}
-// CHECK-LABEL: define hidden <4 x i32> @test_u32x4_splat(
+// CHECK-LABEL: define hidden noundef <4 x i32> @test_u32x4_splat(
// CHECK-SAME: i32 noundef [[A:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[VECINIT_I:%.*]] = insertelement <4 x i32> poison, i32 [[A]], i64 0
@@ -814,7 +814,7 @@ v128_t test_u32x4_replace_lane(v128_t a, uint32_t b) {
return wasm_u32x4_replace_lane(a, 3, b);
}
-// CHECK-LABEL: define hidden <4 x i32> @test_i64x2_splat(
+// CHECK-LABEL: define hidden noundef <4 x i32> @test_i64x2_splat(
// CHECK-SAME: i64 noundef [[A:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[VECINIT_I:%.*]] = insertelement <2 x i64> poison, i64 [[A]], i64 0
@@ -826,7 +826,7 @@ v128_t test_i64x2_splat(int64_t a) {
return wasm_i64x2_splat(a);
}
-// CHECK-LABEL: define hidden <4 x i32> @test_u64x2_splat(
+// CHECK-LABEL: define hidden noundef <4 x i32> @test_u64x2_splat(
// CHECK-SAME: i64 noundef [[A:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[VECINIT_I:%.*]] = insertelement <2 x i64> poison, i64 [[A]], i64 0
@@ -919,7 +919,7 @@ v128_t test_f32x4_replace_lane(v128_t a, float b) {
return wasm_f32x4_replace_lane(a, 3, b);
}
-// CHECK-LABEL: define hidden <4 x i32> @test_f64x2_splat(
+// CHECK-LABEL: define hidden noundef <4 x i32> @test_f64x2_splat(
// CHECK-SAME: double noundef [[A:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[VECINIT_I:%.*]] = insertelement <2 x double> poison, double [[A]], i64 0
diff --git a/clang/test/Lexer/minimize_source_to_dependency_directives_utf8bom.c b/clang/test/Lexer/minimize_source_to_dependency_directives_utf8bom.c
index 46aba91..6f574ac 100644
--- a/clang/test/Lexer/minimize_source_to_dependency_directives_utf8bom.c
+++ b/clang/test/Lexer/minimize_source_to_dependency_directives_utf8bom.c
@@ -1,5 +1,5 @@
// Test UTF8 BOM at start of file
-// RUN: printf '\xef\xbb\xbf' > %t.c
+// RUN: printf '\357\273\277' > %t.c
// RUN: echo '#ifdef TEST\n' >> %t.c
// RUN: echo '#include <string>' >> %t.c
// RUN: echo '#endif' >> %t.c
diff --git a/clang/test/Modules/crash-vfs-relative-incdir.m b/clang/test/Modules/crash-vfs-relative-incdir.m
index c0407f7..46c3413c 100644
--- a/clang/test/Modules/crash-vfs-relative-incdir.m
+++ b/clang/test/Modules/crash-vfs-relative-incdir.m
@@ -53,4 +53,4 @@
// RUN: cd %t
// RUN: chmod 755 crash-vfs-*.sh
-// RUN: ./crash-vfs-*.sh
+// RUN: bash ./crash-vfs-*.sh
diff --git a/clang/test/Modules/crash-vfs-run-reproducer.m b/clang/test/Modules/crash-vfs-run-reproducer.m
index fd861fe..fa06fd9 100644
--- a/clang/test/Modules/crash-vfs-run-reproducer.m
+++ b/clang/test/Modules/crash-vfs-run-reproducer.m
@@ -53,4 +53,4 @@
// RUN: cd %t
// RUN: chmod 755 crash-vfs-*.sh
-// RUN: ./crash-vfs-*.sh
+// RUN: bash ./crash-vfs-*.sh
diff --git a/clang/test/Sema/attr-print.c b/clang/test/Sema/attr-print.c
index 8492356..211e61a 100644
--- a/clang/test/Sema/attr-print.c
+++ b/clang/test/Sema/attr-print.c
@@ -35,3 +35,6 @@ int * __sptr * __ptr32 ppsp32;
// CHECK: __attribute__((availability(macos, strict, introduced=10.6)));
void f6(int) __attribute__((availability(macosx,strict,introduced=10.6)));
+
+// CHECK: _libc_intl_domainname asm("__gi__libc_intl_domainname") __attribute__((visibility("hidden")));
+extern const char _libc_intl_domainname[]; extern typeof (_libc_intl_domainname) _libc_intl_domainname asm("__gi__libc_intl_domainname") __attribute__((visibility("hidden")));
diff --git a/clang/test/SemaCXX/libstdcxx_pair_swap_hack.cpp b/clang/test/SemaCXX/libstdcxx_pair_swap_hack.cpp
index 6b8ca4f..a477571 100644
--- a/clang/test/SemaCXX/libstdcxx_pair_swap_hack.cpp
+++ b/clang/test/SemaCXX/libstdcxx_pair_swap_hack.cpp
@@ -82,13 +82,14 @@ namespace sad {
template<typename T> void swap(T &, T &);
template<typename A, typename B> struct CLASS {
- void swap(CLASS &other) noexcept(noexcept(swap(*this, other))); // expected-error {{too many arguments}} expected-note {{declared here}}
- // expected-error@-1{{uses itself}} expected-note@-1{{in instantiation of}}
+ void swap(CLASS &other) // expected-note {{declared here}}
+ noexcept(noexcept(swap(*this, other))); // expected-error {{too many arguments}}
+ // expected-error@-1{{uses itself}}
};
CLASS<int, int> pi;
- static_assert(!noexcept(pi.swap(pi)), ""); // expected-note 2{{in instantiation of exception specification for 'swap'}}
+ static_assert(!noexcept(pi.swap(pi)), ""); // expected-note {{in instantiation of exception specification for 'swap'}}
}
#endif
diff --git a/clang/test/SemaCXX/warn-implicit-unicode-conversions.cpp b/clang/test/SemaCXX/warn-implicit-unicode-conversions.cpp
index fcff006..f17f20c 100644
--- a/clang/test/SemaCXX/warn-implicit-unicode-conversions.cpp
+++ b/clang/test/SemaCXX/warn-implicit-unicode-conversions.cpp
@@ -14,7 +14,7 @@ void test(char8_t u8, char16_t u16, char32_t u32) {
c16(u32); // expected-warning {{implicit conversion from 'char32_t' to 'char16_t' may lose precision and change the meaning of the represented code unit}}
c32(u8); // expected-warning {{implicit conversion from 'char8_t' to 'char32_t' may change the meaning of the represented code unit}}
- c32(u16); // expected-warning {{implicit conversion from 'char16_t' to 'char32_t' may change the meaning of the represented code unit}}
+ c32(u16);
c32(u32);
@@ -30,7 +30,7 @@ void test(char8_t u8, char16_t u16, char32_t u32) {
c16(char32_t(0x7f));
c16(char32_t(0x80));
c16(char32_t(0xD7FF));
- c16(char32_t(0xD800)); // expected-warning {{implicit conversion from 'char32_t' to 'char16_t' changes the meaning of the code unit '<0xD800>'}}
+ c16(char32_t(0xD800));
c16(char32_t(0xE000));
c16(char32_t(U'🐉')); // expected-warning {{implicit conversion from 'char32_t' to 'char16_t' changes the meaning of the code point '🐉'}}
@@ -44,8 +44,8 @@ void test(char8_t u8, char16_t u16, char32_t u32) {
c32(char16_t(0x80));
c32(char16_t(0xD7FF));
- c32(char16_t(0xD800)); // expected-warning {{implicit conversion from 'char16_t' to 'char32_t' changes the meaning of the code unit '<0xD800>'}}
- c32(char16_t(0xDFFF)); // expected-warning {{implicit conversion from 'char16_t' to 'char32_t' changes the meaning of the code unit '<0xDFFF>'}}
+ c32(char16_t(0xD800));
+ c32(char16_t(0xDFFF));
c32(char16_t(0xE000));
c32(char16_t(u'☕'));
diff --git a/clang/test/SemaTemplate/concepts.cpp b/clang/test/SemaTemplate/concepts.cpp
index aaa20f6..5b0f3d3 100644
--- a/clang/test/SemaTemplate/concepts.cpp
+++ b/clang/test/SemaTemplate/concepts.cpp
@@ -1441,6 +1441,24 @@ void main() { Feeder<int>{}.feed<Cat<int>>(); }
}
+namespace case9 {
+
+template <typename>
+concept a = requires { requires true; };
+template <typename T>
+concept b = a<typename T::EntitySpec>;
+template <typename T>
+concept c = requires { b<T>; };
+template <typename T>
+ requires c<T>
+struct s;
+template <typename> constexpr bool f() { return true; }
+template <typename T> constexpr bool d = f<T>();
+struct s2;
+static_assert(d<s<s2>>);
+
+}
+
}
namespace GH162125 {
@@ -1514,6 +1532,31 @@ static_assert( requires {{ &f } -> C;} ); // expected-error {{reference to overl
}
+namespace GH162092 {
+
+template <typename T>
+struct vector;
+
+template <typename T, typename U>
+concept C = __is_same_as(T, U);
+
+template<class T, auto Cpt>
+concept generic_range_value = requires {
+ Cpt.template operator()<int>();
+};
+
+
+template<generic_range_value<[]<
+ C<int>
+ >() {}> T>
+void x() {}
+
+void foo() {
+ x<vector<int>>();
+}
+
+}
+
namespace GH162770 {
enum e {};
template<e> struct s {};
diff --git a/clang/test/SemaTemplate/instantiate-self.cpp b/clang/test/SemaTemplate/instantiate-self.cpp
index 4999a4a..1cdf2c6 100644
--- a/clang/test/SemaTemplate/instantiate-self.cpp
+++ b/clang/test/SemaTemplate/instantiate-self.cpp
@@ -86,7 +86,7 @@ namespace test7 {
namespace test8 {
template<typename T> struct A {
- int n = A{}.n; // expected-error {{default member initializer for 'n' uses itself}} expected-note {{instantiation of default member init}}
+ int n = A{}.n; // expected-error {{default member initializer for 'n' uses itself}}
};
A<int> ai = {}; // expected-note {{instantiation of default member init}}
}
@@ -100,7 +100,7 @@ namespace test9 {
namespace test10 {
template<typename T> struct A {
- void f() noexcept(noexcept(f())); // expected-error {{exception specification of 'f' uses itself}} expected-note {{instantiation of}}
+ void f() noexcept(noexcept(f())); // expected-error {{exception specification of 'f' uses itself}}
};
bool b = noexcept(A<int>().f()); // expected-note {{instantiation of}}
}
@@ -125,7 +125,7 @@ namespace test11 {
}
namespace test12 {
- template<typename T> int f(T t, int = f(T())) {} // expected-error {{recursive evaluation of default argument}} expected-note {{instantiation of}}
+ template<typename T> int f(T t, int = f(T())) {} // expected-error {{recursive evaluation of default argument}}
struct X {};
int q = f(X()); // expected-note {{instantiation of}}
}
@@ -171,3 +171,25 @@ namespace test13 {
A::Z<A> aza;
#endif
}
+
+namespace test14 {
+ template <class> void f();
+ template <class T, decltype(new (f<void>()) T)> T x;
+}
+
+namespace test15 {
+ template <class V> void __overload(V);
+
+ template <class, class> struct __invoke_result_impl;
+ template <class _Arg>
+ struct __invoke_result_impl<decltype(__overload(*(_Arg*)0)),
+ _Arg>;
+ struct variant {
+ template <class _Arg,
+ class = typename __invoke_result_impl<void, _Arg>::type>
+ variant(_Arg);
+ };
+ struct Matcher {
+ Matcher(variant);
+ } vec(vec); // expected-warning {{uninitialized}}
+} // namespace test15
diff --git a/clang/test/lit.cfg.py b/clang/test/lit.cfg.py
index 29088ef..52b275c 100644
--- a/clang/test/lit.cfg.py
+++ b/clang/test/lit.cfg.py
@@ -18,11 +18,22 @@ from lit.llvm.subst import FindTool
# name: The name of this test suite.
config.name = "Clang"
+# TODO: Consolidate the logic for turning on the internal shell by default for all LLVM test suites.
+# See https://github.com/llvm/llvm-project/issues/106636 for more details.
+#
+# We prefer the lit internal shell which provides a better user experience on failures
+# and is faster unless the user explicitly disables it with LIT_USE_INTERNAL_SHELL=0
+# env var.
+use_lit_shell = True
+lit_shell_env = os.environ.get("LIT_USE_INTERNAL_SHELL")
+if lit_shell_env:
+ use_lit_shell = lit.util.pythonize_bool(lit_shell_env)
+
# testFormat: The test format to use to interpret tests.
#
# For now we require '&&' between commands, until they get globally killed and
# the test runner updated.
-config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
+config.test_format = lit.formats.ShTest(execute_external=not use_lit_shell)
# suffixes: A list of file extensions to treat as test files.
config.suffixes = [
diff --git a/clang/unittests/Analysis/ExprMutationAnalyzerTest.cpp b/clang/unittests/Analysis/ExprMutationAnalyzerTest.cpp
index 95f8ae2..ef22960 100644
--- a/clang/unittests/Analysis/ExprMutationAnalyzerTest.cpp
+++ b/clang/unittests/Analysis/ExprMutationAnalyzerTest.cpp
@@ -2038,4 +2038,42 @@ TEST(ExprMutationAnalyzerTest, PointeeMutatedByConditionOperator) {
EXPECT_TRUE(isPointeeMutated(Results, AST.get()));
}
+TEST(ExprMutationAnalyzerTest, PointeeMutatedByReturn) {
+ {
+ const std::string Code = R"(
+ int * f() {
+ int *const x = nullptr;
+ return x;
+ })";
+ auto AST = buildASTFromCodeWithArgs(Code, {"-Wno-everything"});
+ auto Results =
+ match(withEnclosingCompound(declRefTo("x")), AST->getASTContext());
+ EXPECT_TRUE(isPointeeMutated(Results, AST.get()));
+ }
+ {
+ const std::string Code = R"(
+ int * f() {
+ int *const x = nullptr;
+ return x;
+ })";
+ // in C++23, AST will have NoOp cast.
+ auto AST =
+ buildASTFromCodeWithArgs(Code, {"-Wno-everything", "-std=c++23"});
+ auto Results =
+ match(withEnclosingCompound(declRefTo("x")), AST->getASTContext());
+ EXPECT_TRUE(isPointeeMutated(Results, AST.get()));
+ }
+ {
+ const std::string Code = R"(
+ int const* f() {
+ int *const x = nullptr;
+ return x;
+ })";
+ auto AST = buildASTFromCodeWithArgs(Code, {"-Wno-everything"});
+ auto Results =
+ match(withEnclosingCompound(declRefTo("x")), AST->getASTContext());
+ EXPECT_FALSE(isPointeeMutated(Results, AST.get()));
+ }
+}
+
} // namespace clang
diff --git a/clang/unittests/Format/AlignBracketsTest.cpp b/clang/unittests/Format/AlignBracketsTest.cpp
index c4380ae..ea8db51 100644
--- a/clang/unittests/Format/AlignBracketsTest.cpp
+++ b/clang/unittests/Format/AlignBracketsTest.cpp
@@ -778,6 +778,19 @@ TEST_F(AlignBracketsTest, ParenthesesAndOperandAlignment) {
Style);
}
+TEST_F(AlignBracketsTest, BlockIndentAndNamespace) {
+ auto Style = getLLVMStyleWithColumns(120);
+ Style.AllowShortNamespacesOnASingleLine = true;
+ Style.AlignAfterOpenBracket = FormatStyle::BAS_BlockIndent;
+
+ verifyNoCrash(
+ "namespace {\n"
+ "void xxxxxxxxxxxxxxxxxxxxx(nnnnn::TTTTTTTTTTTTT const *mmmm,\n"
+ " YYYYYYYYYYYYYYYYY &yyyyyyyyyyyyyy);\n"
+ "} //",
+ Style);
+}
+
} // namespace
} // namespace test
} // namespace format
diff --git a/clang/unittests/Format/ConfigParseTest.cpp b/clang/unittests/Format/ConfigParseTest.cpp
index 52f02c3..6488e38 100644
--- a/clang/unittests/Format/ConfigParseTest.cpp
+++ b/clang/unittests/Format/ConfigParseTest.cpp
@@ -176,7 +176,6 @@ TEST(ConfigParseTest, ParsesConfigurationBools) {
CHECK_PARSE_BOOL(BreakBeforeTernaryOperators);
CHECK_PARSE_BOOL(BreakStringLiterals);
CHECK_PARSE_BOOL(CompactNamespaces);
- CHECK_PARSE_BOOL(Cpp11BracedListStyle);
CHECK_PARSE_BOOL(DerivePointerAlignment);
CHECK_PARSE_BOOL_FIELD(DerivePointerAlignment, "DerivePointerBinding");
CHECK_PARSE_BOOL(DisableFormat);
@@ -1139,6 +1138,18 @@ TEST(ConfigParseTest, ParsesConfiguration) {
FormatStyle::SDS_Leave);
CHECK_PARSE("SeparateDefinitionBlocks: Never", SeparateDefinitionBlocks,
FormatStyle::SDS_Never);
+
+ CHECK_PARSE("Cpp11BracedListStyle: Block", Cpp11BracedListStyle,
+ FormatStyle::BLS_Block);
+ CHECK_PARSE("Cpp11BracedListStyle: FunctionCall", Cpp11BracedListStyle,
+ FormatStyle::BLS_FunctionCall);
+ CHECK_PARSE("Cpp11BracedListStyle: AlignFirstComment", Cpp11BracedListStyle,
+ FormatStyle::BLS_AlignFirstComment);
+ // For backward compatibility:
+ CHECK_PARSE("Cpp11BracedListStyle: false", Cpp11BracedListStyle,
+ FormatStyle::BLS_Block);
+ CHECK_PARSE("Cpp11BracedListStyle: true", Cpp11BracedListStyle,
+ FormatStyle::BLS_AlignFirstComment);
}
TEST(ConfigParseTest, ParsesConfigurationWithLanguages) {
diff --git a/clang/unittests/Format/FormatTest.cpp b/clang/unittests/Format/FormatTest.cpp
index b9ad930..a3ad978 100644
--- a/clang/unittests/Format/FormatTest.cpp
+++ b/clang/unittests/Format/FormatTest.cpp
@@ -14363,7 +14363,7 @@ TEST_F(FormatTest, LayoutCxx11BraceInitializers) {
BreakBeforeLambdaBody);
FormatStyle ExtraSpaces = getLLVMStyle();
- ExtraSpaces.Cpp11BracedListStyle = false;
+ ExtraSpaces.Cpp11BracedListStyle = FormatStyle::BLS_Block;
ExtraSpaces.ColumnLimit = 75;
verifyFormat("vector<int> x{ 1, 2, 3, 4 };", ExtraSpaces);
verifyFormat("vector<T> x{ {}, {}, {}, {} };", ExtraSpaces);
@@ -18559,6 +18559,11 @@ TEST_F(FormatTest, AlignConsecutiveMacros) {
"#define bbbb 4\n"
"#define ccc (5)",
Style);
+
+ Style.ColumnLimit = 30;
+ verifyFormat("#define MY_FUNC(x) callMe(X)\n"
+ "#define MY_LONG_CONSTANT 17",
+ Style);
}
TEST_F(FormatTest, AlignConsecutiveAssignmentsAcrossEmptyLines) {
@@ -20346,7 +20351,7 @@ TEST_F(FormatTest, AlignConsecutiveDeclarations) {
" return 0;\n"
"}()};",
BracedAlign);
- BracedAlign.Cpp11BracedListStyle = false;
+ BracedAlign.Cpp11BracedListStyle = FormatStyle::BLS_Block;
verifyFormat("const auto result{ []() {\n"
" const auto something = 1;\n"
" return 2;\n"
@@ -21953,14 +21958,14 @@ TEST_F(FormatTest, CatchAlignArrayOfStructuresRightAlignment) {
"});",
Style);
- Style.Cpp11BracedListStyle = false;
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_Block;
verifyFormat("struct test demo[] = {\n"
" { 56, 23, \"hello\" },\n"
" { -1, 93463, \"world\" },\n"
" { 7, 5, \"!!\" }\n"
"};",
Style);
- Style.Cpp11BracedListStyle = true;
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_AlignFirstComment;
Style.ColumnLimit = 0;
verifyFormat(
@@ -22220,14 +22225,14 @@ TEST_F(FormatTest, CatchAlignArrayOfStructuresLeftAlignment) {
" };",
Style);
- Style.Cpp11BracedListStyle = false;
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_Block;
verifyFormat("struct test demo[] = {\n"
" { 56, 23, \"hello\" },\n"
" { -1, 93463, \"world\" },\n"
" { 7, 5, \"!!\" }\n"
"};",
Style);
- Style.Cpp11BracedListStyle = true;
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_AlignFirstComment;
Style.ColumnLimit = 0;
verifyFormat(
diff --git a/clang/unittests/Format/FormatTestCSharp.cpp b/clang/unittests/Format/FormatTestCSharp.cpp
index ea85ed6..d7fb15d 100644
--- a/clang/unittests/Format/FormatTestCSharp.cpp
+++ b/clang/unittests/Format/FormatTestCSharp.cpp
@@ -1194,7 +1194,7 @@ TEST_F(FormatTestCSharp, CSharpSpaces) {
Style.SpaceBeforeSquareBrackets = false;
Style.SpacesInSquareBrackets = false;
Style.SpaceBeforeCpp11BracedList = true;
- Style.Cpp11BracedListStyle = false;
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_Block;
Style.SpacesInContainerLiterals = false;
Style.SpaceAfterCStyleCast = false;
diff --git a/clang/unittests/Format/FormatTestComments.cpp b/clang/unittests/Format/FormatTestComments.cpp
index 69026bc..fc80bf4 100644
--- a/clang/unittests/Format/FormatTestComments.cpp
+++ b/clang/unittests/Format/FormatTestComments.cpp
@@ -4699,6 +4699,58 @@ TEST_F(FormatTestComments, SplitCommentIntroducers) {
getLLVMStyleWithColumns(10)));
}
+TEST_F(FormatTestComments, LineCommentsOnStartOfFunctionCall) {
+ auto Style = getLLVMStyle();
+
+ EXPECT_EQ(Style.Cpp11BracedListStyle, FormatStyle::BLS_AlignFirstComment);
+ verifyFormat("Type name{// Comment\n"
+ " value};",
+ Style);
+
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_Block;
+ verifyFormat("Type name{ // Comment\n"
+ " value\n"
+ "};",
+ Style);
+
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_FunctionCall;
+ verifyFormat("Type name{ // Comment\n"
+ " value};",
+ Style);
+
+ verifyFormat("T foo( // Comment\n"
+ " arg);",
+ Style);
+
+ verifyFormat("T bar{ // Comment\n"
+ " arg};",
+ Style);
+
+ verifyFormat("T baz({ // Comment\n"
+ " arg});",
+ Style);
+
+ verifyFormat("T baz{{ // Comment\n"
+ " arg}};",
+ Style);
+
+ verifyFormat("T b0z(f( // Comment\n"
+ " arg));",
+ Style);
+
+ verifyFormat("T b0z(F{ // Comment\n"
+ " arg});",
+ Style);
+
+ verifyFormat("func( // Comment\n"
+ " arg);",
+ Style);
+
+ verifyFormat("func({ // Comment\n"
+ " arg});",
+ Style);
+}
+
} // end namespace
} // namespace test
} // end namespace format
diff --git a/clang/unittests/Format/FormatTestJava.cpp b/clang/unittests/Format/FormatTestJava.cpp
index 1275564..1416614b 100644
--- a/clang/unittests/Format/FormatTestJava.cpp
+++ b/clang/unittests/Format/FormatTestJava.cpp
@@ -236,7 +236,7 @@ TEST_F(FormatTestJava, ArrayInitializers) {
"};");
FormatStyle Style = getStyleWithColumns(65);
- Style.Cpp11BracedListStyle = false;
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_Block;
verifyFormat(
"expected = new int[] { 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,\n"
" 100, 100, 100, 100, 100, 100, 100, 100, 100, 100 };",
diff --git a/clang/unittests/Format/FormatTestTextProto.cpp b/clang/unittests/Format/FormatTestTextProto.cpp
index fd65c9a..6cddb838 100644
--- a/clang/unittests/Format/FormatTestTextProto.cpp
+++ b/clang/unittests/Format/FormatTestTextProto.cpp
@@ -514,7 +514,7 @@ TEST_F(FormatTestTextProto, FormatsRepeatedListInitializers) {
"key: value");
auto Style = getDefaultStyle();
- Style.Cpp11BracedListStyle = true;
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_AlignFirstComment;
verifyFormat("keys: [1]", Style);
}
diff --git a/clang/unittests/Format/FormatTestVerilog.cpp b/clang/unittests/Format/FormatTestVerilog.cpp
index 5c50ae6..63e2cadf 100644
--- a/clang/unittests/Format/FormatTestVerilog.cpp
+++ b/clang/unittests/Format/FormatTestVerilog.cpp
@@ -1287,7 +1287,7 @@ TEST_F(FormatTestVerilog, StringLiteral) {
getStyleWithColumns(getDefaultStyle(), 32));
// Space around braces should be correct.
auto Style = getStyleWithColumns(getDefaultStyle(), 24);
- Style.Cpp11BracedListStyle = false;
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_Block;
verifyFormat(R"(x({ "xxxxxxxxxxxxxxxx ",
"xxxx" });)",
R"(x("xxxxxxxxxxxxxxxx xxxx");)", Style);
diff --git a/clang/unittests/Format/TokenAnnotatorTest.cpp b/clang/unittests/Format/TokenAnnotatorTest.cpp
index 1152466..ca99940 100644
--- a/clang/unittests/Format/TokenAnnotatorTest.cpp
+++ b/clang/unittests/Format/TokenAnnotatorTest.cpp
@@ -1129,6 +1129,11 @@ TEST_F(TokenAnnotatorTest, UnderstandsOverloadedOperators) {
ASSERT_EQ(Tokens.size(), 7u) << Tokens;
// Not TT_FunctionDeclarationName.
EXPECT_TOKEN(Tokens[3], tok::kw_operator, TT_Unknown);
+
+ Tokens = annotate("SomeAPI::operator()();");
+ ASSERT_EQ(Tokens.size(), 9u) << Tokens;
+ // Not TT_FunctionDeclarationName.
+ EXPECT_TOKEN(Tokens[2], tok::kw_operator, TT_Unknown);
}
TEST_F(TokenAnnotatorTest, OverloadedOperatorInTemplate) {
@@ -4232,6 +4237,29 @@ TEST_F(TokenAnnotatorTest, QtProperty) {
EXPECT_TOKEN(Tokens[12], tok::identifier, TT_QtProperty);
}
+TEST_F(TokenAnnotatorTest, AttributeSquares) {
+ auto Tokens = annotate("[[maybe_unused]] const int i;");
+ ASSERT_EQ(Tokens.size(), 10u) << Tokens;
+ EXPECT_TOKEN(Tokens[0], tok::l_square, TT_AttributeLSquare);
+ EXPECT_TOKEN(Tokens[1], tok::l_square, TT_Unknown);
+ EXPECT_TOKEN(Tokens[3], tok::r_square, TT_Unknown);
+ EXPECT_TOKEN(Tokens[4], tok::r_square, TT_AttributeRSquare);
+ EXPECT_TRUE(Tokens[4]->EndsCppAttributeGroup);
+
+ Tokens = annotate("[[foo([[]])]] [[maybe_unused]] int j;");
+ ASSERT_EQ(Tokens.size(), 20u) << Tokens;
+ EXPECT_TOKEN(Tokens[0], tok::l_square, TT_AttributeLSquare);
+ EXPECT_TOKEN(Tokens[1], tok::l_square, TT_Unknown);
+ EXPECT_TOKEN(Tokens[9], tok::r_square, TT_Unknown);
+ EXPECT_TOKEN(Tokens[10], tok::r_square, TT_AttributeRSquare);
+ EXPECT_FALSE(Tokens[10]->EndsCppAttributeGroup);
+ EXPECT_TOKEN(Tokens[11], tok::l_square, TT_AttributeLSquare);
+ EXPECT_TOKEN(Tokens[12], tok::l_square, TT_Unknown);
+ EXPECT_TOKEN(Tokens[14], tok::r_square, TT_Unknown);
+ EXPECT_TOKEN(Tokens[15], tok::r_square, TT_AttributeRSquare);
+ EXPECT_TRUE(Tokens[15]->EndsCppAttributeGroup);
+}
+
} // namespace
} // namespace format
} // namespace clang
diff --git a/clang/www/cxx_status.html b/clang/www/cxx_status.html
index a35e501..2618ff9 100755
--- a/clang/www/cxx_status.html
+++ b/clang/www/cxx_status.html
@@ -926,7 +926,14 @@ C++23, informally referred to as C++26.</p>
</tr>
<tr>
<td><a href="https://wg21.link/p1857r3">P1857R3</a></td>
- <td class="none" align="center">No</td>
+ <td class="partial" align="center">
+ <details>
+ <summary>Clang 21 (Partial)</summary>
+ The restriction that "[a] module directive may only appear
+ as the first preprocessing tokens in a file" is enforced
+ starting in Clang 21.
+ </details>
+ </td>
</tr>
<tr>
<td><a href="https://wg21.link/p2115r0">P2115R0</a></td>