diff options
Diffstat (limited to 'llvm/test')
60 files changed, 3567 insertions, 933 deletions
diff --git a/llvm/test/Analysis/BranchProbabilityInfo/pr22718.ll b/llvm/test/Analysis/BranchProbabilityInfo/pr22718.ll index ed851f2..67ce44e 100644 --- a/llvm/test/Analysis/BranchProbabilityInfo/pr22718.ll +++ b/llvm/test/Analysis/BranchProbabilityInfo/pr22718.ll @@ -12,14 +12,14 @@ @.str = private unnamed_addr constant [17 x i8] c"x = %lu\0Ay = %lu\0A\00", align 1 ; Function Attrs: inlinehint nounwind uwtable -define i32 @main() #0 { +define i32 @main() { entry: %retval = alloca i32, align 4 %i = alloca i64, align 8 store i32 0, ptr %retval store i64 0, ptr @y, align 8 store i64 0, ptr @x, align 8 - call void @srand(i32 422304) #3 + call void @srand(i32 422304) store i64 0, ptr %i, align 8 br label %for.cond @@ -29,7 +29,7 @@ for.cond: ; preds = %for.inc, %entry br i1 %cmp, label %for.body, label %for.end, !prof !1 for.body: ; preds = %for.cond - %call = call i32 @rand() #3 + %call = call i32 @rand() %conv = sitofp i32 %call to double %mul = fmul double %conv, 1.000000e+02 %div = fdiv double %mul, 0x41E0000000000000 @@ -65,17 +65,12 @@ for.end: ; preds = %for.cond } ; Function Attrs: nounwind -declare void @srand(i32) #1 +declare void @srand(i32) ; Function Attrs: nounwind -declare i32 @rand() #1 +declare i32 @rand() -declare i32 @printf(ptr, ...) #2 - -attributes #0 = { inlinehint nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #2 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #3 = { nounwind } +declare i32 @printf(ptr, ...) !llvm.ident = !{!0} diff --git a/llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll b/llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll index 245e8f7..058370c 100644 --- a/llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll +++ b/llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll @@ -23,10 +23,10 @@ %"class.llvm::Metadata.306.1758.9986.10470.10954.11438.11922.12406.12890.13374.13858.15310.15794.16278.17730.19182.21118.25958.26926.29346.29830.30314.30798.31282.31766.32250.32734.33702.36606.38058.41638" = type { i8, i8, i16, i32 } ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end(ptr nocapture) #0 +declare void @llvm.lifetime.end(ptr nocapture) ; Function Attrs: nounwind ssp uwtable -define hidden void @fun(ptr %N, i1 %arg) #1 align 2 { +define hidden void @fun(ptr %N, i1 %arg) align 2 { ; CHECK: define entry: %NumOperands.i = getelementptr inbounds %"class.llvm::SDNode.310.1762.9990.10474.10958.11442.11926.12410.12894.13378.13862.15314.15798.16282.17734.19186.21122.25962.26930.29350.29834.30318.30802.31286.31770.32254.32738.33706.36610.38062.41642", ptr %N, i64 0, i32 8 @@ -47,8 +47,6 @@ for.body: ; preds = %for.body, %for.body br i1 %exitcond193, label %for.cond.cleanup, label %for.body } -attributes #0 = { argmemonly nounwind } -attributes #1 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } !llvm.ident = !{!0} diff --git a/llvm/test/Analysis/Delinearization/constant_functions_multi_dim.ll b/llvm/test/Analysis/Delinearization/constant_functions_multi_dim.ll index 0c0fb41..891d604 100644 --- a/llvm/test/Analysis/Delinearization/constant_functions_multi_dim.ll +++ b/llvm/test/Analysis/Delinearization/constant_functions_multi_dim.ll @@ -4,7 +4,7 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" ; Function Attrs: noinline nounwind uwtable -define void @mat_mul(ptr %C, ptr %A, ptr %B, i64 %N) #0 !kernel_arg_addr_space !2 !kernel_arg_access_qual !3 !kernel_arg_type !4 !kernel_arg_base_type !4 !kernel_arg_type_qual !5 { +define void @mat_mul(ptr %C, ptr %A, ptr %B, i64 %N) !kernel_arg_addr_space !2 !kernel_arg_access_qual !3 !kernel_arg_type !4 !kernel_arg_base_type !4 !kernel_arg_type_qual !5 { ; CHECK-LABEL: 'mat_mul' ; CHECK-NEXT: Inst: %tmp = load float, ptr %arrayidx, align 4 ; CHECK-NEXT: AccessFunction: {(4 * %N * %call),+,4}<%for.inc> @@ -22,8 +22,8 @@ entry: br label %entry.split entry.split: ; preds = %entry - %call = tail call i64 @_Z13get_global_idj(i32 0) #3 - %call1 = tail call i64 @_Z13get_global_idj(i32 1) #3 + %call = tail call i64 @_Z13get_global_idj(i32 0) + %call1 = tail call i64 @_Z13get_global_idj(i32 1) %cmp1 = icmp sgt i64 %N, 0 %mul = mul nsw i64 %call, %N br i1 %cmp1, label %for.inc.lr.ph, label %for.end @@ -59,15 +59,10 @@ for.end: ; preds = %for.cond.for.end_cr } ; Function Attrs: nounwind readnone -declare i64 @_Z13get_global_idj(i32) #1 +declare i64 @_Z13get_global_idj(i32) ; Function Attrs: nounwind readnone speculatable -declare float @llvm.fmuladd.f32(float, float, float) #2 - -attributes #0 = { noinline nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #2 = { nounwind readnone speculatable } -attributes #3 = { nounwind readnone } +declare float @llvm.fmuladd.f32(float, float, float) !llvm.module.flags = !{!0} !llvm.ident = !{!1} diff --git a/llvm/test/Analysis/DependenceAnalysis/MIVCheckConst.ll b/llvm/test/Analysis/DependenceAnalysis/MIVCheckConst.ll index b498d7064..f5be89a 100644 --- a/llvm/test/Analysis/DependenceAnalysis/MIVCheckConst.ll +++ b/llvm/test/Analysis/DependenceAnalysis/MIVCheckConst.ll @@ -30,7 +30,7 @@ target datalayout = "e-m:e-p:32:32-i1:32-i64:64-a:0-v32:32-n16:32" %20 = type { [768 x i32] } %21 = type { [416 x i32] } -define void @test(ptr %A, ptr %B, i1 %arg, i32 %n, i32 %m) #0 align 2 { +define void @test(ptr %A, ptr %B, i1 %arg, i32 %n, i32 %m) align 2 { ; CHECK-LABEL: 'test' ; CHECK-NEXT: Src: %v1 = load i32, ptr %B, align 4 --> Dst: %v1 = load i32, ptr %B, align 4 ; CHECK-NEXT: da analyze - none! @@ -91,5 +91,3 @@ bb38: bb40: ret void } - -attributes #0 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } diff --git a/llvm/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll b/llvm/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll index e5d5d21e..eba017a 100644 --- a/llvm/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll +++ b/llvm/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll @@ -52,7 +52,7 @@ for.end: @a = global [10004 x [10004 x i32]] zeroinitializer, align 16 ; Function Attrs: nounwind uwtable -define void @coupled_miv_type_mismatch(i32 %n) #0 { +define void @coupled_miv_type_mismatch(i32 %n) { ; CHECK-LABEL: 'coupled_miv_type_mismatch' ; CHECK-NEXT: Src: %2 = load i32, ptr %arrayidx5, align 4 --> Dst: %2 = load i32, ptr %arrayidx5, align 4 ; CHECK-NEXT: da analyze - none! @@ -101,8 +101,6 @@ for.end13: ; preds = %for.cond ret void } -attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" } - !llvm.ident = !{!0} !0 = !{!"clang version 3.7.0"} diff --git a/llvm/test/Analysis/MemoryDependenceAnalysis/invariant.group-bug.ll b/llvm/test/Analysis/MemoryDependenceAnalysis/invariant.group-bug.ll index c11191e..5470ef9 100644 --- a/llvm/test/Analysis/MemoryDependenceAnalysis/invariant.group-bug.ll +++ b/llvm/test/Analysis/MemoryDependenceAnalysis/invariant.group-bug.ll @@ -17,13 +17,13 @@ target triple = "x86_64-grtev4-linux-gnu" %4 = type { ptr } %5 = type { i64, [8 x i8] } -define void @fail(ptr noalias sret(i1) %arg, ptr %arg1, ptr %arg2, ptr %arg3, i1 %arg4) local_unnamed_addr #0 { +define void @fail(ptr noalias sret(i1) %arg, ptr %arg1, ptr %arg2, ptr %arg3, i1 %arg4) local_unnamed_addr { ; CHECK-LABEL: @fail( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[I4:%.*]] = load ptr, ptr [[ARG1:%.*]], align 8, !invariant.group [[META6:![0-9]+]] ; CHECK-NEXT: [[I5:%.*]] = getelementptr inbounds ptr, ptr [[I4]], i64 6 ; CHECK-NEXT: [[I6:%.*]] = load ptr, ptr [[I5]], align 8, !invariant.load [[META6]] -; CHECK-NEXT: [[I7:%.*]] = tail call i64 [[I6]](ptr [[ARG1]]) #[[ATTR1:[0-9]+]] +; CHECK-NEXT: [[I7:%.*]] = tail call i64 [[I6]](ptr [[ARG1]]) ; CHECK-NEXT: [[I9:%.*]] = load ptr, ptr [[ARG2:%.*]], align 8 ; CHECK-NEXT: store i8 0, ptr [[I9]], align 1 ; CHECK-NEXT: br i1 [[ARG4:%.*]], label [[BB10:%.*]], label [[BB29:%.*]] @@ -32,7 +32,7 @@ define void @fail(ptr noalias sret(i1) %arg, ptr %arg1, ptr %arg2, ptr %arg3, i1 ; CHECK-NEXT: [[I15_PRE:%.*]] = load ptr, ptr [[I14_PHI_TRANS_INSERT]], align 8, !invariant.load [[META6]] ; CHECK-NEXT: br label [[BB12:%.*]] ; CHECK: bb12: -; CHECK-NEXT: [[I16:%.*]] = call i64 [[I15_PRE]](ptr nonnull [[ARG1]], ptr null, i64 0) #[[ATTR1]] +; CHECK-NEXT: [[I16:%.*]] = call i64 [[I15_PRE]](ptr nonnull [[ARG1]], ptr null, i64 0) ; CHECK-NEXT: br i1 true, label [[BB28:%.*]], label [[BB17:%.*]] ; CHECK: bb17: ; CHECK-NEXT: br i1 true, label [[BB18:%.*]], label [[BB21:%.*]] @@ -55,7 +55,7 @@ bb: %i4 = load ptr, ptr %arg1, align 8, !invariant.group !6 %i5 = getelementptr inbounds ptr, ptr %i4, i64 6 %i6 = load ptr, ptr %i5, align 8, !invariant.load !6 - %i7 = tail call i64 %i6(ptr %arg1) #1 + %i7 = tail call i64 %i6(ptr %arg1) %i9 = load ptr, ptr %arg2, align 8 store i8 0, ptr %i9, align 1 br i1 %arg4, label %bb10, label %bb29 @@ -67,7 +67,7 @@ bb12: ; preds = %bb28, %bb10 %i13 = load ptr, ptr %arg1, align 8, !invariant.group !6 %i14 = getelementptr inbounds ptr, ptr %i13, i64 22 %i15 = load ptr, ptr %i14, align 8, !invariant.load !6 - %i16 = call i64 %i15(ptr nonnull %arg1, ptr null, i64 0) #1 + %i16 = call i64 %i15(ptr nonnull %arg1, ptr null, i64 0) br i1 %arg4, label %bb28, label %bb17 bb17: ; preds = %bb12 @@ -110,9 +110,6 @@ bb29: ; preds = %bb28, %bb ret void } -attributes #0 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="non-leaf" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+popcnt,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="non-leaf" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+popcnt,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } - !llvm.linker.options = !{} !llvm.module.flags = !{!0, !1, !3, !4, !5} diff --git a/llvm/test/Analysis/MemorySSA/pr28880.ll b/llvm/test/Analysis/MemorySSA/pr28880.ll index 98f3261..a2690b9 100644 --- a/llvm/test/Analysis/MemorySSA/pr28880.ll +++ b/llvm/test/Analysis/MemorySSA/pr28880.ll @@ -8,7 +8,7 @@ @global.1 = external hidden unnamed_addr global double, align 8 ; Function Attrs: nounwind ssp uwtable -define hidden fastcc void @hoge(i1 %arg) unnamed_addr #0 { +define hidden fastcc void @hoge(i1 %arg) unnamed_addr { bb: br i1 %arg, label %bb1, label %bb2 @@ -45,6 +45,3 @@ bb4: ; preds = %bb3 bb6: ; preds = %bb3 unreachable } - -attributes #0 = { nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3" "unsafe-fp-math"="false" "use-soft-float"="false" } - diff --git a/llvm/test/Analysis/MemorySSA/pr39197.ll b/llvm/test/Analysis/MemorySSA/pr39197.ll index af57b3c..6be0c58 100644 --- a/llvm/test/Analysis/MemorySSA/pr39197.ll +++ b/llvm/test/Analysis/MemorySSA/pr39197.ll @@ -12,13 +12,13 @@ declare void @dummy() ; CHECK-LABEL: @main() ; Function Attrs: nounwind -define dso_local void @main() #0 { +define dso_local void @main() { call void @func_1() unreachable } ; Function Attrs: nounwind -define dso_local void @func_1() #0 { +define dso_local void @func_1() { %1 = alloca ptr, align 8 %2 = call signext i32 @func_2() %3 = icmp ne i32 %2, 0 @@ -64,45 +64,45 @@ define dso_local void @func_1() #0 { } ; Function Attrs: nounwind -declare dso_local signext i32 @func_2() #0 +declare dso_local signext i32 @func_2() ; Function Attrs: nounwind -define dso_local void @safe_sub_func_uint8_t_u_u() #0 { +define dso_local void @safe_sub_func_uint8_t_u_u() { ret void } ; Function Attrs: nounwind -define dso_local void @safe_add_func_int64_t_s_s() #0 { +define dso_local void @safe_add_func_int64_t_s_s() { ret void } ; Function Attrs: nounwind -define dso_local void @safe_rshift_func_int16_t_s_u() #0 { +define dso_local void @safe_rshift_func_int16_t_s_u() { ret void } ; Function Attrs: nounwind -define dso_local void @safe_div_func_uint8_t_u_u() #0 { +define dso_local void @safe_div_func_uint8_t_u_u() { ret void } ; Function Attrs: nounwind -define dso_local void @safe_mul_func_uint16_t_u_u() #0 { +define dso_local void @safe_mul_func_uint16_t_u_u() { ret void } ; Function Attrs: nounwind -define dso_local void @safe_mul_func_int16_t_s_s() #0 { +define dso_local void @safe_mul_func_int16_t_s_s() { ret void } ; Function Attrs: nounwind -define dso_local void @safe_div_func_int32_t_s_s() #0 { +define dso_local void @safe_div_func_int32_t_s_s() { ret void } ; Function Attrs: nounwind -define dso_local signext i16 @safe_sub_func_int16_t_s_s(i16 signext) #0 { +define dso_local signext i16 @safe_sub_func_int16_t_s_s(i16 signext) { %2 = alloca i16, align 2 store i16 %0, ptr %2, align 2, !tbaa !1 %3 = load i16, ptr %2, align 2, !tbaa !1 @@ -113,29 +113,25 @@ define dso_local signext i16 @safe_sub_func_int16_t_s_s(i16 signext) #0 { } ; Function Attrs: nounwind -define dso_local void @safe_add_func_uint16_t_u_u() #0 { +define dso_local void @safe_add_func_uint16_t_u_u() { ret void } ; Function Attrs: nounwind -define dso_local void @safe_div_func_int8_t_s_s() #0 { +define dso_local void @safe_div_func_int8_t_s_s() { ret void } ; Function Attrs: nounwind -define dso_local void @safe_add_func_int16_t_s_s() #0 { +define dso_local void @safe_add_func_int16_t_s_s() { ret void } ; Function Attrs: nounwind -define dso_local void @safe_add_func_uint8_t_u_u() #0 { +define dso_local void @safe_add_func_uint8_t_u_u() { ret void } -attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="z13" "target-features"="+transactional-execution,+vector" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { argmemonly nounwind } -attributes #2 = { nounwind } - !llvm.ident = !{!0} !0 = !{!"clang version 8.0.0 (http://llvm.org/git/clang.git 7cda4756fc9713d98fd3513b8df172700f267bad) (http://llvm.org/git/llvm.git 199c0d32e96b646bd8cf6beeaf0f99f8a434b56a)"} diff --git a/llvm/test/Analysis/MemorySSA/pr40038.ll b/llvm/test/Analysis/MemorySSA/pr40038.ll index efdcbe5..39ea78b 100644 --- a/llvm/test/Analysis/MemorySSA/pr40038.ll +++ b/llvm/test/Analysis/MemorySSA/pr40038.ll @@ -10,21 +10,21 @@ target triple = "s390x-ibm-linux" ; Function Attrs: nounwind ; CHECK-LABEL: @main -define dso_local void @main() #0 { +define dso_local void @main() { bb: call void @func_1() unreachable } ; Function Attrs: nounwind -define dso_local void @func_1() #0 { +define dso_local void @func_1() { bb: call void @func_2() unreachable } ; Function Attrs: nounwind -define dso_local void @func_2() #0 { +define dso_local void @func_2() { bb: %tmp = alloca i32, align 4 store i32 0, ptr @g_80, align 4, !tbaa !1 @@ -68,10 +68,7 @@ bb18: ; preds = %bb12, %bb1 } ; Function Attrs: cold noreturn nounwind -declare void @llvm.trap() #1 - -attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="z13" "target-features"="+transactional-execution,+vector" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { cold noreturn nounwind } +declare void @llvm.trap() !llvm.ident = !{!0} diff --git a/llvm/test/Analysis/MemorySSA/pr43569.ll b/llvm/test/Analysis/MemorySSA/pr43569.ll index 02d074e..c81f8d4 100644 --- a/llvm/test/Analysis/MemorySSA/pr43569.ll +++ b/llvm/test/Analysis/MemorySSA/pr43569.ll @@ -10,7 +10,7 @@ target triple = "x86_64-unknown-linux-gnu" ; CHECK-LABEL: @c() ; Function Attrs: nounwind uwtable -define dso_local void @c() #0 { +define dso_local void @c() { entry: call void @llvm.instrprof.increment(ptr @__profn_c, i64 68269137, i32 3, i32 0) br label %for.cond @@ -42,8 +42,4 @@ for.end: ; preds = %for.cond1 } ; Function Attrs: nounwind -declare void @llvm.instrprof.increment(ptr, i64, i32, i32) #1 - -attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { nounwind } - +declare void @llvm.instrprof.increment(ptr, i64, i32, i32) diff --git a/llvm/test/Analysis/ScalarEvolution/pr22674.ll b/llvm/test/Analysis/ScalarEvolution/pr22674.ll index 95f96ca..b2f4ae6 100644 --- a/llvm/test/Analysis/ScalarEvolution/pr22674.ll +++ b/llvm/test/Analysis/ScalarEvolution/pr22674.ll @@ -11,7 +11,7 @@ target triple = "x86_64-pc-linux-gnux32" %"class.llvm::AttributeImpl.2.1802.3601.5914.6685.7456.8227.9255.9769.10026.18508" = type <{ ptr, %"class.llvm::FoldingSetImpl::Node.1.1801.3600.5913.6684.7455.8226.9254.9768.10025.18505", i8, [3 x i8] }> ; Function Attrs: nounwind uwtable -define void @_ZNK4llvm11AttrBuilder13hasAttributesENS_12AttributeSetEy(i1 %arg) #0 align 2 { +define void @_ZNK4llvm11AttrBuilder13hasAttributesENS_12AttributeSetEy(i1 %arg) align 2 { entry: br i1 %arg, label %cond.false, label %_ZNK4llvm12AttributeSet11getNumSlotsEv.exit @@ -82,8 +82,6 @@ return: ; preds = %_ZNK4llvm9Attribute ret void } -attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } - !llvm.module.flags = !{!0} !llvm.ident = !{!1} diff --git a/llvm/test/Analysis/ScalarEvolution/scev-canonical-mode.ll b/llvm/test/Analysis/ScalarEvolution/scev-canonical-mode.ll index 3879b2e7..d9cc3e5 100644 --- a/llvm/test/Analysis/ScalarEvolution/scev-canonical-mode.ll +++ b/llvm/test/Analysis/ScalarEvolution/scev-canonical-mode.ll @@ -6,7 +6,7 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" ; Function Attrs: norecurse nounwind uwtable -define void @ehF(i1 %arg) #0 { +define void @ehF(i1 %arg) { entry: br i1 %arg, label %if.then.i, label %hup.exit @@ -28,5 +28,3 @@ for.body.i: ; preds = %for.body.i, %for.bo hup.exit: ; preds = %for.body.i, %if.then.i, %entry ret void } - -attributes #0 = { norecurse nounwind uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" } diff --git a/llvm/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll b/llvm/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll index 67d81e7..7fb4231 100644 --- a/llvm/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll +++ b/llvm/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll @@ -13,7 +13,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3 %classD = type { ptr } ; Function Attrs: ssp uwtable -define ptr @test(ptr %this, ptr %p1) #0 align 2 { +define ptr @test(ptr %this, ptr %p1) align 2 { entry: ; CHECK-LABEL: @test ; CHECK: load ptr, ptr %p1, align 8, !tbaa @@ -25,10 +25,7 @@ entry: unreachable } -declare void @callee(ptr, ptr) #1 - -attributes #0 = { ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +declare void @callee(ptr, ptr) !llvm.ident = !{!0} diff --git a/llvm/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll b/llvm/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll index 942fdf5..f9a2988 100644 --- a/llvm/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll +++ b/llvm/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll @@ -9,7 +9,7 @@ %struct.StructC = type { i16, %struct.StructB, i32 } %struct.StructD = type { i16, %struct.StructB, i32, i8 } -define i32 @_Z1gPjP7StructAy(ptr %s, ptr %A, i64 %count) #0 { +define i32 @_Z1gPjP7StructAy(ptr %s, ptr %A, i64 %count) { entry: ; Access to ptr and &(A->f32). ; CHECK: Function @@ -35,7 +35,7 @@ entry: ret i32 %3 } -define i32 @_Z2g2PjP7StructAy(ptr %s, ptr %A, i64 %count) #0 { +define i32 @_Z2g2PjP7StructAy(ptr %s, ptr %A, i64 %count) { entry: ; Access to ptr and &(A->f16). ; CHECK: Function @@ -60,7 +60,7 @@ entry: ret i32 %3 } -define i32 @_Z2g3P7StructAP7StructBy(ptr %A, ptr %B, i64 %count) #0 { +define i32 @_Z2g3P7StructAP7StructBy(ptr %A, ptr %B, i64 %count) { entry: ; Access to &(A->f32) and &(B->a.f32). ; CHECK: Function @@ -89,7 +89,7 @@ entry: ret i32 %3 } -define i32 @_Z2g4P7StructAP7StructBy(ptr %A, ptr %B, i64 %count) #0 { +define i32 @_Z2g4P7StructAP7StructBy(ptr %A, ptr %B, i64 %count) { entry: ; Access to &(A->f32) and &(B->a.f16). ; CHECK: Function @@ -117,7 +117,7 @@ entry: ret i32 %3 } -define i32 @_Z2g5P7StructAP7StructBy(ptr %A, ptr %B, i64 %count) #0 { +define i32 @_Z2g5P7StructAP7StructBy(ptr %A, ptr %B, i64 %count) { entry: ; Access to &(A->f32) and &(B->f32). ; CHECK: Function @@ -145,7 +145,7 @@ entry: ret i32 %3 } -define i32 @_Z2g6P7StructAP7StructBy(ptr %A, ptr %B, i64 %count) #0 { +define i32 @_Z2g6P7StructAP7StructBy(ptr %A, ptr %B, i64 %count) { entry: ; Access to &(A->f32) and &(B->a.f32_2). ; CHECK: Function @@ -174,7 +174,7 @@ entry: ret i32 %3 } -define i32 @_Z2g7P7StructAP7StructSy(ptr %A, ptr %S, i64 %count) #0 { +define i32 @_Z2g7P7StructAP7StructSy(ptr %A, ptr %S, i64 %count) { entry: ; Access to &(A->f32) and &(S->f32). ; CHECK: Function @@ -202,7 +202,7 @@ entry: ret i32 %3 } -define i32 @_Z2g8P7StructAP7StructSy(ptr %A, ptr %S, i64 %count) #0 { +define i32 @_Z2g8P7StructAP7StructSy(ptr %A, ptr %S, i64 %count) { entry: ; Access to &(A->f32) and &(S->f16). ; CHECK: Function @@ -229,7 +229,7 @@ entry: ret i32 %3 } -define i32 @_Z2g9P7StructSP8StructS2y(ptr %S, ptr %S2, i64 %count) #0 { +define i32 @_Z2g9P7StructSP8StructS2y(ptr %S, ptr %S2, i64 %count) { entry: ; Access to &(S->f32) and &(S2->f32). ; CHECK: Function @@ -257,7 +257,7 @@ entry: ret i32 %3 } -define i32 @_Z3g10P7StructSP8StructS2y(ptr %S, ptr %S2, i64 %count) #0 { +define i32 @_Z3g10P7StructSP8StructS2y(ptr %S, ptr %S2, i64 %count) { entry: ; Access to &(S->f32) and &(S2->f16). ; CHECK: Function @@ -284,7 +284,7 @@ entry: ret i32 %3 } -define i32 @_Z3g11P7StructCP7StructDy(ptr %C, ptr %D, i64 %count) #0 { +define i32 @_Z3g11P7StructCP7StructDy(ptr %C, ptr %D, i64 %count) { entry: ; Access to &(C->b.a.f32) and &(D->b.a.f32). ; CHECK: Function @@ -318,7 +318,7 @@ entry: ret i32 %3 } -define i32 @_Z3g12P7StructCP7StructDy(ptr %C, ptr %D, i64 %count) #0 { +define i32 @_Z3g12P7StructCP7StructDy(ptr %C, ptr %D, i64 %count) { entry: ; Access to &(b1->a.f32) and &(b2->a.f32). ; CHECK: Function @@ -357,8 +357,6 @@ entry: ret i32 %5 } -attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" } - !0 = !{!1, !1, i64 0} !1 = !{!"any pointer", !2} !2 = !{!"omnipotent char", !3} diff --git a/llvm/test/CodeGen/AArch64/pr164181.ll b/llvm/test/CodeGen/AArch64/pr164181.ll new file mode 100644 index 0000000..4ec63ec --- /dev/null +++ b/llvm/test/CodeGen/AArch64/pr164181.ll @@ -0,0 +1,640 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -verify-machineinstrs < %s | FileCheck %s + +; This test recreates a regalloc crash reported in +; https://github.com/llvm/llvm-project/issues/164181 +; When rematting an instruction we need to make sure to constrain the newly +; allocated register to both the rematted def's reg class and the use's reg +; class. + +target triple = "aarch64-unknown-linux-gnu" + +@var_32 = external global i16 +@var_35 = external global i64 +@var_39 = external global i64 +@var_46 = external global i64 +@var_50 = external global i32 + +define void @f(i1 %var_0, i16 %var_1, i64 %var_2, i8 %var_3, i16 %var_4, i1 %var_5, i32 %var_6, i32 %var_7, i8 %var_10, i64 %var_11, i8 %var_14, i32 %var_15, i64 %var_16, ptr %arr_3, ptr %arr_4, ptr %arr_6, ptr %arr_7, ptr %arr_12, ptr %arr_13, ptr %arr_19, i64 %mul, i64 %conv35, i64 %idxprom138.us16, i8 %0, i8 %1, ptr %invariant.gep875.us) #0 { +; CHECK-LABEL: f: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: sub sp, sp, #240 +; CHECK-NEXT: str x30, [sp, #144] // 8-byte Folded Spill +; CHECK-NEXT: stp x28, x27, [sp, #160] // 16-byte Folded Spill +; CHECK-NEXT: stp x26, x25, [sp, #176] // 16-byte Folded Spill +; CHECK-NEXT: stp x24, x23, [sp, #192] // 16-byte Folded Spill +; CHECK-NEXT: stp x22, x21, [sp, #208] // 16-byte Folded Spill +; CHECK-NEXT: stp x20, x19, [sp, #224] // 16-byte Folded Spill +; CHECK-NEXT: str w6, [sp, #20] // 4-byte Folded Spill +; CHECK-NEXT: str w4, [sp, #72] // 4-byte Folded Spill +; CHECK-NEXT: str w3, [sp, #112] // 4-byte Folded Spill +; CHECK-NEXT: str w5, [sp, #36] // 4-byte Folded Spill +; CHECK-NEXT: tbz w5, #0, .LBB0_43 +; CHECK-NEXT: // %bb.1: // %for.body41.lr.ph +; CHECK-NEXT: ldr x4, [sp, #312] +; CHECK-NEXT: ldr x14, [sp, #280] +; CHECK-NEXT: tbz w0, #0, .LBB0_42 +; CHECK-NEXT: // %bb.2: // %for.body41.us.preheader +; CHECK-NEXT: ldrb w8, [sp, #368] +; CHECK-NEXT: ldrb w12, [sp, #256] +; CHECK-NEXT: ldr w26, [sp, #264] +; CHECK-NEXT: adrp x20, :got:var_50 +; CHECK-NEXT: mov x28, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov w21, #36006 // =0x8ca6 +; CHECK-NEXT: ldr x11, [sp, #376] +; CHECK-NEXT: ldrb w13, [sp, #360] +; CHECK-NEXT: ldp x17, x16, [sp, #296] +; CHECK-NEXT: mov w22, #1 // =0x1 +; CHECK-NEXT: add x27, x14, #120 +; CHECK-NEXT: ldr x18, [sp, #288] +; CHECK-NEXT: ldr x7, [sp, #272] +; CHECK-NEXT: ldr x5, [sp, #248] +; CHECK-NEXT: mov x10, xzr +; CHECK-NEXT: mov w23, wzr +; CHECK-NEXT: mov w30, wzr +; CHECK-NEXT: ldrb w19, [sp, #240] +; CHECK-NEXT: mov w25, wzr +; CHECK-NEXT: mov x24, xzr +; CHECK-NEXT: str w8, [sp, #108] // 4-byte Folded Spill +; CHECK-NEXT: mov x3, x26 +; CHECK-NEXT: ldp x9, x8, [sp, #344] +; CHECK-NEXT: str w12, [sp, #92] // 4-byte Folded Spill +; CHECK-NEXT: mov w12, #1 // =0x1 +; CHECK-NEXT: bic w12, w12, w0 +; CHECK-NEXT: str w12, [sp, #76] // 4-byte Folded Spill +; CHECK-NEXT: mov w12, #48 // =0x30 +; CHECK-NEXT: str x9, [sp, #136] // 8-byte Folded Spill +; CHECK-NEXT: ldp x9, x15, [sp, #328] +; CHECK-NEXT: madd x8, x8, x12, x9 +; CHECK-NEXT: str x8, [sp, #64] // 8-byte Folded Spill +; CHECK-NEXT: add x8, x26, w26, uxtw #1 +; CHECK-NEXT: ldr x20, [x20, :got_lo12:var_50] +; CHECK-NEXT: str x26, [sp, #96] // 8-byte Folded Spill +; CHECK-NEXT: str x14, [sp, #152] // 8-byte Folded Spill +; CHECK-NEXT: lsl x6, x8, #3 +; CHECK-NEXT: add x8, x14, #120 +; CHECK-NEXT: str x4, [sp, #24] // 8-byte Folded Spill +; CHECK-NEXT: str w19, [sp, #16] // 4-byte Folded Spill +; CHECK-NEXT: str x8, [sp, #80] // 8-byte Folded Spill +; CHECK-NEXT: b .LBB0_4 +; CHECK-NEXT: .p2align 5, , 16 +; CHECK-NEXT: .LBB0_3: // in Loop: Header=BB0_4 Depth=1 +; CHECK-NEXT: ldr w19, [sp, #16] // 4-byte Folded Reload +; CHECK-NEXT: ldr x24, [sp, #40] // 8-byte Folded Reload +; CHECK-NEXT: ldr x14, [sp, #152] // 8-byte Folded Reload +; CHECK-NEXT: mov w23, #1 // =0x1 +; CHECK-NEXT: mov w30, #1 // =0x1 +; CHECK-NEXT: mov w25, w19 +; CHECK-NEXT: .LBB0_4: // %for.body41.us +; CHECK-NEXT: // =>This Loop Header: Depth=1 +; CHECK-NEXT: // Child Loop BB0_6 Depth 2 +; CHECK-NEXT: // Child Loop BB0_8 Depth 3 +; CHECK-NEXT: // Child Loop BB0_10 Depth 4 +; CHECK-NEXT: // Child Loop BB0_11 Depth 5 +; CHECK-NEXT: // Child Loop BB0_28 Depth 5 +; CHECK-NEXT: // Child Loop BB0_39 Depth 5 +; CHECK-NEXT: ldr w8, [sp, #20] // 4-byte Folded Reload +; CHECK-NEXT: mov x12, x24 +; CHECK-NEXT: str x24, [sp, #48] // 8-byte Folded Spill +; CHECK-NEXT: str w8, [x14] +; CHECK-NEXT: mov w8, #1 // =0x1 +; CHECK-NEXT: strb w19, [x14] +; CHECK-NEXT: b .LBB0_6 +; CHECK-NEXT: .p2align 5, , 16 +; CHECK-NEXT: .LBB0_5: // %for.cond.cleanup93.us +; CHECK-NEXT: // in Loop: Header=BB0_6 Depth=2 +; CHECK-NEXT: ldr w9, [sp, #36] // 4-byte Folded Reload +; CHECK-NEXT: ldr x4, [sp, #24] // 8-byte Folded Reload +; CHECK-NEXT: ldp x24, x12, [sp, #48] // 16-byte Folded Reload +; CHECK-NEXT: mov x22, xzr +; CHECK-NEXT: mov w25, wzr +; CHECK-NEXT: mov w8, wzr +; CHECK-NEXT: tbz w9, #0, .LBB0_3 +; CHECK-NEXT: .LBB0_6: // %for.body67.us +; CHECK-NEXT: // Parent Loop BB0_4 Depth=1 +; CHECK-NEXT: // => This Loop Header: Depth=2 +; CHECK-NEXT: // Child Loop BB0_8 Depth 3 +; CHECK-NEXT: // Child Loop BB0_10 Depth 4 +; CHECK-NEXT: // Child Loop BB0_11 Depth 5 +; CHECK-NEXT: // Child Loop BB0_28 Depth 5 +; CHECK-NEXT: // Child Loop BB0_39 Depth 5 +; CHECK-NEXT: str x12, [sp, #40] // 8-byte Folded Spill +; CHECK-NEXT: cmn x24, #30 +; CHECK-NEXT: mov x12, #-30 // =0xffffffffffffffe2 +; CHECK-NEXT: add x19, x4, w8, sxtw #2 +; CHECK-NEXT: mov x9, xzr +; CHECK-NEXT: csel x12, x24, x12, lo +; CHECK-NEXT: mov w4, w30 +; CHECK-NEXT: str x12, [sp, #56] // 8-byte Folded Spill +; CHECK-NEXT: b .LBB0_8 +; CHECK-NEXT: .p2align 5, , 16 +; CHECK-NEXT: .LBB0_7: // %for.cond.cleanup98.us +; CHECK-NEXT: // in Loop: Header=BB0_8 Depth=3 +; CHECK-NEXT: ldr w4, [sp, #72] // 4-byte Folded Reload +; CHECK-NEXT: ldr w23, [sp, #128] // 4-byte Folded Reload +; CHECK-NEXT: mov w9, #1 // =0x1 +; CHECK-NEXT: mov x22, xzr +; CHECK-NEXT: tbnz w0, #0, .LBB0_5 +; CHECK-NEXT: .LBB0_8: // %for.cond95.preheader.us +; CHECK-NEXT: // Parent Loop BB0_4 Depth=1 +; CHECK-NEXT: // Parent Loop BB0_6 Depth=2 +; CHECK-NEXT: // => This Loop Header: Depth=3 +; CHECK-NEXT: // Child Loop BB0_10 Depth 4 +; CHECK-NEXT: // Child Loop BB0_11 Depth 5 +; CHECK-NEXT: // Child Loop BB0_28 Depth 5 +; CHECK-NEXT: // Child Loop BB0_39 Depth 5 +; CHECK-NEXT: ldr x8, [sp, #64] // 8-byte Folded Reload +; CHECK-NEXT: mov w14, #1152 // =0x480 +; CHECK-NEXT: mov w24, #1 // =0x1 +; CHECK-NEXT: mov w12, wzr +; CHECK-NEXT: str wzr, [sp, #132] // 4-byte Folded Spill +; CHECK-NEXT: mov w30, w4 +; CHECK-NEXT: madd x8, x9, x14, x8 +; CHECK-NEXT: mov w14, #1 // =0x1 +; CHECK-NEXT: str x8, [sp, #120] // 8-byte Folded Spill +; CHECK-NEXT: add x8, x9, x9, lsl #1 +; CHECK-NEXT: lsl x26, x8, #4 +; CHECK-NEXT: sxtb w8, w23 +; CHECK-NEXT: mov w23, w25 +; CHECK-NEXT: str w8, [sp, #116] // 4-byte Folded Spill +; CHECK-NEXT: b .LBB0_10 +; CHECK-NEXT: .p2align 5, , 16 +; CHECK-NEXT: .LBB0_9: // %for.cond510.preheader.us +; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4 +; CHECK-NEXT: ldr w23, [sp, #92] // 4-byte Folded Reload +; CHECK-NEXT: mov x22, x8 +; CHECK-NEXT: ldr x3, [sp, #96] // 8-byte Folded Reload +; CHECK-NEXT: ldr x27, [sp, #80] // 8-byte Folded Reload +; CHECK-NEXT: mov x28, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov x14, xzr +; CHECK-NEXT: ldr w8, [sp, #76] // 4-byte Folded Reload +; CHECK-NEXT: tbz w8, #31, .LBB0_7 +; CHECK-NEXT: .LBB0_10: // %for.body99.us +; CHECK-NEXT: // Parent Loop BB0_4 Depth=1 +; CHECK-NEXT: // Parent Loop BB0_6 Depth=2 +; CHECK-NEXT: // Parent Loop BB0_8 Depth=3 +; CHECK-NEXT: // => This Loop Header: Depth=4 +; CHECK-NEXT: // Child Loop BB0_11 Depth 5 +; CHECK-NEXT: // Child Loop BB0_28 Depth 5 +; CHECK-NEXT: // Child Loop BB0_39 Depth 5 +; CHECK-NEXT: ldr w8, [sp, #116] // 4-byte Folded Reload +; CHECK-NEXT: and w8, w8, w8, asr #31 +; CHECK-NEXT: str w8, [sp, #128] // 4-byte Folded Spill +; CHECK-NEXT: .p2align 5, , 16 +; CHECK-NEXT: .LBB0_11: // %for.body113.us +; CHECK-NEXT: // Parent Loop BB0_4 Depth=1 +; CHECK-NEXT: // Parent Loop BB0_6 Depth=2 +; CHECK-NEXT: // Parent Loop BB0_8 Depth=3 +; CHECK-NEXT: // Parent Loop BB0_10 Depth=4 +; CHECK-NEXT: // => This Inner Loop Header: Depth=5 +; CHECK-NEXT: tbnz w0, #0, .LBB0_11 +; CHECK-NEXT: // %bb.12: // %for.cond131.preheader.us +; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4 +; CHECK-NEXT: ldr w8, [sp, #112] // 4-byte Folded Reload +; CHECK-NEXT: mov w4, #1 // =0x1 +; CHECK-NEXT: strb w8, [x18] +; CHECK-NEXT: ldr x8, [sp, #120] // 8-byte Folded Reload +; CHECK-NEXT: ldrh w8, [x8] +; CHECK-NEXT: cbnz w4, .LBB0_14 +; CHECK-NEXT: // %bb.13: // %cond.true146.us +; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4 +; CHECK-NEXT: ldrsb w4, [x27, x3] +; CHECK-NEXT: b .LBB0_15 +; CHECK-NEXT: .p2align 5, , 16 +; CHECK-NEXT: .LBB0_14: // in Loop: Header=BB0_10 Depth=4 +; CHECK-NEXT: mov w4, wzr +; CHECK-NEXT: .LBB0_15: // %cond.end154.us +; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4 +; CHECK-NEXT: mov w25, #18984 // =0x4a28 +; CHECK-NEXT: mul w8, w8, w25 +; CHECK-NEXT: and w8, w8, #0xfff8 +; CHECK-NEXT: lsl w8, w8, w4 +; CHECK-NEXT: cbz w8, .LBB0_17 +; CHECK-NEXT: // %bb.16: // %if.then.us +; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4 +; CHECK-NEXT: str wzr, [sp, #132] // 4-byte Folded Spill +; CHECK-NEXT: str wzr, [x18] +; CHECK-NEXT: .LBB0_17: // %if.end.us +; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4 +; CHECK-NEXT: ldr w8, [sp, #108] // 4-byte Folded Reload +; CHECK-NEXT: mov w4, #18984 // =0x4a28 +; CHECK-NEXT: mov w25, w23 +; CHECK-NEXT: strb w8, [x18] +; CHECK-NEXT: ldrsb w8, [x27, x3] +; CHECK-NEXT: lsl w8, w4, w8 +; CHECK-NEXT: mov x4, #-18403 // =0xffffffffffffb81d +; CHECK-NEXT: movk x4, #58909, lsl #16 +; CHECK-NEXT: cbz w8, .LBB0_19 +; CHECK-NEXT: // %bb.18: // %if.then.us.2 +; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4 +; CHECK-NEXT: str wzr, [sp, #132] // 4-byte Folded Spill +; CHECK-NEXT: strb wzr, [x18] +; CHECK-NEXT: .LBB0_19: // %if.then.us.5 +; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4 +; CHECK-NEXT: ldr w23, [sp, #132] // 4-byte Folded Reload +; CHECK-NEXT: mov w8, #29625 // =0x73b9 +; CHECK-NEXT: movk w8, #21515, lsl #16 +; CHECK-NEXT: cmp w23, w8 +; CHECK-NEXT: csel w23, w23, w8, lt +; CHECK-NEXT: str w23, [sp, #132] // 4-byte Folded Spill +; CHECK-NEXT: tbz w0, #0, .LBB0_21 +; CHECK-NEXT: // %bb.20: // in Loop: Header=BB0_10 Depth=4 +; CHECK-NEXT: mov w8, wzr +; CHECK-NEXT: b .LBB0_22 +; CHECK-NEXT: .p2align 5, , 16 +; CHECK-NEXT: .LBB0_21: // %cond.true146.us.7 +; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4 +; CHECK-NEXT: ldrsb w8, [x27, x3] +; CHECK-NEXT: .LBB0_22: // %cond.end154.us.7 +; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4 +; CHECK-NEXT: mov w23, #18984 // =0x4a28 +; CHECK-NEXT: mov w3, #149 // =0x95 +; CHECK-NEXT: lsl w8, w23, w8 +; CHECK-NEXT: cbz w8, .LBB0_24 +; CHECK-NEXT: // %bb.23: // %if.then.us.7 +; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4 +; CHECK-NEXT: ldr x8, [sp, #152] // 8-byte Folded Reload +; CHECK-NEXT: str wzr, [sp, #132] // 4-byte Folded Spill +; CHECK-NEXT: str wzr, [x8] +; CHECK-NEXT: .LBB0_24: // %if.end.us.7 +; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4 +; CHECK-NEXT: mov x23, xzr +; CHECK-NEXT: b .LBB0_28 +; CHECK-NEXT: .p2align 5, , 16 +; CHECK-NEXT: .LBB0_25: // %cond.true331.us +; CHECK-NEXT: // in Loop: Header=BB0_28 Depth=5 +; CHECK-NEXT: ldrsb w4, [x10] +; CHECK-NEXT: .LBB0_26: // %cond.end345.us +; CHECK-NEXT: // in Loop: Header=BB0_28 Depth=5 +; CHECK-NEXT: strh w4, [x18] +; CHECK-NEXT: mul x4, x22, x28 +; CHECK-NEXT: adrp x22, :got:var_46 +; CHECK-NEXT: mov x8, xzr +; CHECK-NEXT: ldr x22, [x22, :got_lo12:var_46] +; CHECK-NEXT: str x4, [x22] +; CHECK-NEXT: mov x4, #-18403 // =0xffffffffffffb81d +; CHECK-NEXT: movk x4, #58909, lsl #16 +; CHECK-NEXT: .LBB0_27: // %for.inc371.us +; CHECK-NEXT: // in Loop: Header=BB0_28 Depth=5 +; CHECK-NEXT: mov w22, #-18978 // =0xffffb5de +; CHECK-NEXT: orr x23, x23, #0x1 +; CHECK-NEXT: mov x24, xzr +; CHECK-NEXT: mul w12, w12, w22 +; CHECK-NEXT: mov x22, x5 +; CHECK-NEXT: tbz w0, #0, .LBB0_36 +; CHECK-NEXT: .LBB0_28: // %for.body194.us +; CHECK-NEXT: // Parent Loop BB0_4 Depth=1 +; CHECK-NEXT: // Parent Loop BB0_6 Depth=2 +; CHECK-NEXT: // Parent Loop BB0_8 Depth=3 +; CHECK-NEXT: // Parent Loop BB0_10 Depth=4 +; CHECK-NEXT: // => This Inner Loop Header: Depth=5 +; CHECK-NEXT: cbnz wzr, .LBB0_30 +; CHECK-NEXT: // %bb.29: // %if.then222.us +; CHECK-NEXT: // in Loop: Header=BB0_28 Depth=5 +; CHECK-NEXT: adrp x27, :got:var_32 +; CHECK-NEXT: ldur w8, [x19, #-12] +; CHECK-NEXT: ldr x27, [x27, :got_lo12:var_32] +; CHECK-NEXT: strh w8, [x27] +; CHECK-NEXT: sxtb w8, w25 +; CHECK-NEXT: bic w25, w8, w8, asr #31 +; CHECK-NEXT: b .LBB0_31 +; CHECK-NEXT: .p2align 5, , 16 +; CHECK-NEXT: .LBB0_30: // in Loop: Header=BB0_28 Depth=5 +; CHECK-NEXT: mov w25, wzr +; CHECK-NEXT: .LBB0_31: // %if.end239.us +; CHECK-NEXT: // in Loop: Header=BB0_28 Depth=5 +; CHECK-NEXT: strb w3, [x16] +; CHECK-NEXT: tst w13, #0xff +; CHECK-NEXT: b.eq .LBB0_33 +; CHECK-NEXT: // %bb.32: // %if.then254.us +; CHECK-NEXT: // in Loop: Header=BB0_28 Depth=5 +; CHECK-NEXT: ldrh w8, [x26, x14, lsl #1] +; CHECK-NEXT: adrp x27, :got:var_35 +; CHECK-NEXT: ldr x27, [x27, :got_lo12:var_35] +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: csel x8, xzr, x7, eq +; CHECK-NEXT: str x8, [x27] +; CHECK-NEXT: strh w1, [x17] +; CHECK-NEXT: .LBB0_33: // %if.end282.us +; CHECK-NEXT: // in Loop: Header=BB0_28 Depth=5 +; CHECK-NEXT: orr x27, x24, x4 +; CHECK-NEXT: adrp x8, :got:var_39 +; CHECK-NEXT: str x27, [x18] +; CHECK-NEXT: ldr x8, [x8, :got_lo12:var_39] +; CHECK-NEXT: str x10, [x8] +; CHECK-NEXT: ldrb w8, [x6, x9] +; CHECK-NEXT: str x8, [x18] +; CHECK-NEXT: mov w8, #1 // =0x1 +; CHECK-NEXT: cbnz x2, .LBB0_27 +; CHECK-NEXT: // %bb.34: // %if.then327.us +; CHECK-NEXT: // in Loop: Header=BB0_28 Depth=5 +; CHECK-NEXT: cbz w8, .LBB0_25 +; CHECK-NEXT: // %bb.35: // in Loop: Header=BB0_28 Depth=5 +; CHECK-NEXT: mov w4, wzr +; CHECK-NEXT: b .LBB0_26 +; CHECK-NEXT: .p2align 5, , 16 +; CHECK-NEXT: .LBB0_36: // %for.cond376.preheader.us +; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4 +; CHECK-NEXT: mov w3, #1152 // =0x480 +; CHECK-NEXT: mov x22, xzr +; CHECK-NEXT: mov w4, wzr +; CHECK-NEXT: mov x24, x27 +; CHECK-NEXT: lsl x23, x14, #1 +; CHECK-NEXT: mov x27, #-1 // =0xffffffffffffffff +; CHECK-NEXT: madd x14, x14, x3, x11 +; CHECK-NEXT: mov w28, w30 +; CHECK-NEXT: mov w3, #-7680 // =0xffffe200 +; CHECK-NEXT: b .LBB0_39 +; CHECK-NEXT: .p2align 5, , 16 +; CHECK-NEXT: .LBB0_37: // %if.then466.us +; CHECK-NEXT: // in Loop: Header=BB0_39 Depth=5 +; CHECK-NEXT: ldr x28, [sp, #152] // 8-byte Folded Reload +; CHECK-NEXT: ldr x3, [sp, #136] // 8-byte Folded Reload +; CHECK-NEXT: sxtb w4, w4 +; CHECK-NEXT: bic w4, w4, w4, asr #31 +; CHECK-NEXT: str x3, [x28] +; CHECK-NEXT: mov w3, #-7680 // =0xffffe200 +; CHECK-NEXT: .LBB0_38: // %for.inc505.us +; CHECK-NEXT: // in Loop: Header=BB0_39 Depth=5 +; CHECK-NEXT: add x22, x22, #1 +; CHECK-NEXT: add x27, x27, #1 +; CHECK-NEXT: mov w28, wzr +; CHECK-NEXT: cmp x27, #0 +; CHECK-NEXT: b.hs .LBB0_9 +; CHECK-NEXT: .LBB0_39: // %for.body380.us +; CHECK-NEXT: // Parent Loop BB0_4 Depth=1 +; CHECK-NEXT: // Parent Loop BB0_6 Depth=2 +; CHECK-NEXT: // Parent Loop BB0_8 Depth=3 +; CHECK-NEXT: // Parent Loop BB0_10 Depth=4 +; CHECK-NEXT: // => This Inner Loop Header: Depth=5 +; CHECK-NEXT: mov w30, w28 +; CHECK-NEXT: ldrh w28, [x23] +; CHECK-NEXT: tst w0, #0x1 +; CHECK-NEXT: strh w28, [x11] +; CHECK-NEXT: csel w28, w21, w3, ne +; CHECK-NEXT: str w28, [x20] +; CHECK-NEXT: cbz x15, .LBB0_38 +; CHECK-NEXT: // %bb.40: // %if.then436.us +; CHECK-NEXT: // in Loop: Header=BB0_39 Depth=5 +; CHECK-NEXT: ldrh w28, [x14] +; CHECK-NEXT: cbnz w28, .LBB0_37 +; CHECK-NEXT: // %bb.41: // in Loop: Header=BB0_39 Depth=5 +; CHECK-NEXT: mov w4, wzr +; CHECK-NEXT: b .LBB0_38 +; CHECK-NEXT: .LBB0_42: // %for.body41 +; CHECK-NEXT: strb wzr, [x4] +; CHECK-NEXT: strb wzr, [x14] +; CHECK-NEXT: .LBB0_43: // %for.cond563.preheader +; CHECK-NEXT: ldp x20, x19, [sp, #224] // 16-byte Folded Reload +; CHECK-NEXT: ldp x22, x21, [sp, #208] // 16-byte Folded Reload +; CHECK-NEXT: ldp x24, x23, [sp, #192] // 16-byte Folded Reload +; CHECK-NEXT: ldp x26, x25, [sp, #176] // 16-byte Folded Reload +; CHECK-NEXT: ldp x28, x27, [sp, #160] // 16-byte Folded Reload +; CHECK-NEXT: ldr x30, [sp, #144] // 8-byte Folded Reload +; CHECK-NEXT: add sp, sp, #240 +; CHECK-NEXT: ret +entry: + br i1 %var_5, label %for.body41.lr.ph, label %for.cond563.preheader + +for.body41.lr.ph: ; preds = %entry + %arrayidx147 = getelementptr i8, ptr %arr_3, i64 120 + %tobool326.not = icmp eq i64 %var_2, 0 + %not353 = xor i64 0, -1 + %add538 = select i1 %var_0, i16 0, i16 1 + br i1 %var_0, label %for.body41.us, label %for.body41 + +for.body41.us: ; preds = %for.cond.cleanup93.us, %for.body41.lr.ph + %var_24.promoted9271009.us = phi i64 [ 0, %for.body41.lr.ph ], [ %6, %for.cond.cleanup93.us ] + %var_37.promoted9301008.us = phi i64 [ 1, %for.body41.lr.ph ], [ 0, %for.cond.cleanup93.us ] + %2 = phi i8 [ 0, %for.body41.lr.ph ], [ 1, %for.cond.cleanup93.us ] + %add4139751001.us = phi i16 [ 0, %for.body41.lr.ph ], [ 1, %for.cond.cleanup93.us ] + %3 = phi i8 [ 0, %for.body41.lr.ph ], [ %var_10, %for.cond.cleanup93.us ] + store i32 %var_6, ptr %arr_3, align 4 + store i8 %var_10, ptr %arr_3, align 1 + br label %for.body67.us + +for.body67.us: ; preds = %for.cond.cleanup93.us, %for.body41.us + %4 = phi i8 [ %3, %for.body41.us ], [ 0, %for.cond.cleanup93.us ] + %add413977.us = phi i16 [ %add4139751001.us, %for.body41.us ], [ %add413.us17, %for.cond.cleanup93.us ] + %5 = phi i8 [ %2, %for.body41.us ], [ %.sroa.speculated829.us, %for.cond.cleanup93.us ] + %conv64922.us = phi i32 [ 1, %for.body41.us ], [ 0, %for.cond.cleanup93.us ] + %6 = phi i64 [ %var_24.promoted9271009.us, %for.body41.us ], [ %.sroa.speculated832.us, %for.cond.cleanup93.us ] + %mul354903918.us = phi i64 [ %var_37.promoted9301008.us, %for.body41.us ], [ 0, %for.cond.cleanup93.us ] + %i_2.0921.us = zext i32 %var_15 to i64 + %.sroa.speculated832.us = tail call i64 @llvm.umin.i64(i64 %var_24.promoted9271009.us, i64 -30) + %sext1023 = shl i64 %i_2.0921.us, 1 + %idxprom138.us162 = ashr i64 %sext1023, 1 + %gep889.us = getelementptr [24 x i16], ptr %arr_19, i64 %idxprom138.us16 + %arrayidx149.us = getelementptr i8, ptr %arrayidx147, i64 %idxprom138.us162 + %arrayidx319.us = getelementptr [24 x i8], ptr null, i64 %idxprom138.us162 + %7 = sext i32 %conv64922.us to i64 + %8 = getelementptr i32, ptr %arr_12, i64 %7 + %arrayidx226.us = getelementptr i8, ptr %8, i64 -12 + br label %for.cond95.preheader.us + +for.cond.cleanup93.us: ; preds = %for.cond.cleanup98.us + br i1 %var_5, label %for.body67.us, label %for.body41.us + +for.cond.cleanup98.us: ; preds = %for.cond510.preheader.us + br i1 %var_0, label %for.cond.cleanup93.us, label %for.cond95.preheader.us + +for.body99.us: ; preds = %for.cond95.preheader.us, %for.cond510.preheader.us + %mul287985.us = phi i16 [ 0, %for.cond95.preheader.us ], [ %mul287.us, %for.cond510.preheader.us ] + %9 = phi i8 [ %29, %for.cond95.preheader.us ], [ %var_14, %for.cond510.preheader.us ] + %add413979.us = phi i16 [ %add413978.us, %for.cond95.preheader.us ], [ %add413.us17, %for.cond510.preheader.us ] + %10 = phi i32 [ 0, %for.cond95.preheader.us ], [ %26, %for.cond510.preheader.us ] + %mul354905.us = phi i64 [ %mul354904.us, %for.cond95.preheader.us ], [ %mul354907.us, %for.cond510.preheader.us ] + %sub283896.us = phi i64 [ 1, %for.cond95.preheader.us ], [ %sub283.us, %for.cond510.preheader.us ] + %conv96880.us = phi i64 [ 1, %for.cond95.preheader.us ], [ 0, %for.cond510.preheader.us ] + %.sroa.speculated829.us = tail call i8 @llvm.smin.i8(i8 %30, i8 0) + br label %for.body113.us + +for.body380.us: ; preds = %for.cond376.preheader.us, %for.inc505.us + %indvars.iv1018 = phi i64 [ 0, %for.cond376.preheader.us ], [ %indvars.iv.next1019, %for.inc505.us ] + %11 = phi i8 [ 0, %for.cond376.preheader.us ], [ %13, %for.inc505.us ] + %add413980.us = phi i16 [ %add413979.us, %for.cond376.preheader.us ], [ 0, %for.inc505.us ] + %12 = load i16, ptr %arrayidx384.us, align 2 + store i16 %12, ptr %invariant.gep875.us, align 2 + %add413.us17 = or i16 %add413980.us, 0 + %arrayidx416.us = getelementptr i16, ptr %arr_13, i64 %indvars.iv1018 + %conv419.us = select i1 %var_0, i32 36006, i32 -7680 + store i32 %conv419.us, ptr @var_50, align 4 + %tobool435.not.us = icmp eq i64 %mul, 0 + br i1 %tobool435.not.us, label %for.inc505.us, label %if.then436.us + +if.then436.us: ; preds = %for.body380.us + %.sroa.speculated817.us = tail call i8 @llvm.smax.i8(i8 %11, i8 0) + %cond464.in.us = load i16, ptr %gep876.us, align 2 + %tobool465.not.us = icmp eq i16 %cond464.in.us, 0 + br i1 %tobool465.not.us, label %for.inc505.us, label %if.then466.us + +if.then466.us: ; preds = %if.then436.us + store i64 %conv35, ptr %arr_3, align 8 + br label %for.inc505.us + +for.inc505.us: ; preds = %if.then466.us, %if.then436.us, %for.body380.us + %13 = phi i8 [ %11, %for.body380.us ], [ %.sroa.speculated817.us, %if.then466.us ], [ 0, %if.then436.us ] + %indvars.iv.next1019 = add i64 %indvars.iv1018, 1 + %cmp378.us = icmp ult i64 %indvars.iv1018, 0 + br i1 %cmp378.us, label %for.body380.us, label %for.cond510.preheader.us + +for.body194.us: ; preds = %if.end.us.7, %for.inc371.us + %indvars.iv = phi i64 [ 0, %if.end.us.7 ], [ %indvars.iv.next, %for.inc371.us ] + %mul287986.us = phi i16 [ %mul287985.us, %if.end.us.7 ], [ %mul287.us, %for.inc371.us ] + %14 = phi i8 [ %9, %if.end.us.7 ], [ %16, %for.inc371.us ] + %mul354906.us = phi i64 [ %mul354905.us, %if.end.us.7 ], [ %var_11, %for.inc371.us ] + %sub283897.us = phi i64 [ %sub283896.us, %if.end.us.7 ], [ 0, %for.inc371.us ] + %tobool221.not.us = icmp eq i32 1, 0 + br i1 %tobool221.not.us, label %if.end239.us, label %if.then222.us + +if.then222.us: ; preds = %for.body194.us + %15 = load i32, ptr %arrayidx226.us, align 4 + %conv227.us = trunc i32 %15 to i16 + store i16 %conv227.us, ptr @var_32, align 2 + %.sroa.speculated820.us = tail call i8 @llvm.smax.i8(i8 %14, i8 0) + br label %if.end239.us + +if.end239.us: ; preds = %if.then222.us, %for.body194.us + %16 = phi i8 [ %.sroa.speculated820.us, %if.then222.us ], [ 0, %for.body194.us ] + store i8 -107, ptr %arr_7, align 1 + %tobool253.not.us = icmp eq i8 %0, 0 + br i1 %tobool253.not.us, label %if.end282.us, label %if.then254.us + +if.then254.us: ; preds = %if.end239.us + %17 = load i16, ptr %arrayidx259.us, align 2 + %tobool261.not.us = icmp eq i16 %17, 0 + %conv268.us = select i1 %tobool261.not.us, i64 0, i64 %var_16 + store i64 %conv268.us, ptr @var_35, align 8 + %gep867.us = getelementptr [24 x [24 x i64]], ptr null, i64 %indvars.iv + store i16 %var_1, ptr %arr_6, align 2 + br label %if.end282.us + +if.end282.us: ; preds = %if.then254.us, %if.end239.us + %sub283.us = or i64 %sub283897.us, -434259939 + store i64 %sub283.us, ptr %arr_4, align 8 + %mul287.us = mul i16 %mul287986.us, -18978 + store i64 0, ptr @var_39, align 8 + %18 = load i8, ptr %arrayidx321.us, align 1 + %conv322.us = zext i8 %18 to i64 + store i64 %conv322.us, ptr %arr_4, align 8 + br i1 %tobool326.not, label %if.then327.us, label %for.inc371.us + +if.then327.us: ; preds = %if.end282.us + %tobool330.not.us = icmp eq i32 0, 0 + br i1 %tobool330.not.us, label %cond.end345.us, label %cond.true331.us + +cond.true331.us: ; preds = %if.then327.us + %19 = load i8, ptr null, align 1 + %20 = sext i8 %19 to i16 + br label %cond.end345.us + +cond.end345.us: ; preds = %cond.true331.us, %if.then327.us + %cond346.us = phi i16 [ %20, %cond.true331.us ], [ 0, %if.then327.us ] + store i16 %cond346.us, ptr %arr_4, align 2 + %mul354.us = mul i64 %mul354906.us, %not353 + store i64 %mul354.us, ptr @var_46, align 8 + br label %for.inc371.us + +for.inc371.us: ; preds = %cond.end345.us, %if.end282.us + %mul354907.us = phi i64 [ 1, %if.end282.us ], [ 0, %cond.end345.us ] + %indvars.iv.next = or i64 %indvars.iv, 1 + br i1 %var_0, label %for.body194.us, label %for.cond376.preheader.us + +cond.true146.us: ; preds = %for.cond131.preheader.us + %21 = load i8, ptr %arrayidx149.us, align 1 + %conv150.us = sext i8 %21 to i32 + br label %cond.end154.us + +cond.end154.us: ; preds = %for.cond131.preheader.us, %cond.true146.us + %cond155.us = phi i32 [ %conv150.us, %cond.true146.us ], [ 0, %for.cond131.preheader.us ] + %shl.us = shl i32 %div.us, %cond155.us + %tobool157.not.us = icmp eq i32 %shl.us, 0 + br i1 %tobool157.not.us, label %if.end.us, label %if.then.us + +if.then.us: ; preds = %cond.end154.us + store i32 0, ptr %arr_4, align 4 + br label %if.end.us + +if.end.us: ; preds = %if.then.us, %cond.end154.us + %22 = phi i32 [ 0, %if.then.us ], [ %10, %cond.end154.us ] + store i8 %1, ptr %arr_4, align 1 + call void @llvm.assume(i1 true) + %23 = load i8, ptr %arrayidx149.us, align 1 + %conv150.us.2 = sext i8 %23 to i32 + %shl.us.2 = shl i32 18984, %conv150.us.2 + %tobool157.not.us.2 = icmp eq i32 %shl.us.2, 0 + br i1 %tobool157.not.us.2, label %if.then.us.5, label %if.then.us.2 + +if.then.us.2: ; preds = %if.end.us + %.sroa.speculated826.us.2 = tail call i32 @llvm.smin.i32(i32 %10, i32 0) + store i8 0, ptr %arr_4, align 1 + br label %if.then.us.5 + +if.then.us.5: ; preds = %if.then.us.2, %if.end.us + %24 = phi i32 [ 0, %if.then.us.2 ], [ %22, %if.end.us ] + %.sroa.speculated826.us.5 = tail call i32 @llvm.smin.i32(i32 %24, i32 1410036665) + br i1 %var_0, label %cond.end154.us.7, label %cond.true146.us.7 + +cond.true146.us.7: ; preds = %if.then.us.5 + %25 = load i8, ptr %arrayidx149.us, align 1 + %conv150.us.7 = sext i8 %25 to i32 + br label %cond.end154.us.7 + +cond.end154.us.7: ; preds = %cond.true146.us.7, %if.then.us.5 + %cond155.us.7 = phi i32 [ %conv150.us.7, %cond.true146.us.7 ], [ 0, %if.then.us.5 ] + %shl.us.7 = shl i32 18984, %cond155.us.7 + %tobool157.not.us.7 = icmp eq i32 %shl.us.7, 0 + br i1 %tobool157.not.us.7, label %if.end.us.7, label %if.then.us.7 + +if.then.us.7: ; preds = %cond.end154.us.7 + store i32 0, ptr %arr_3, align 4 + br label %if.end.us.7 + +if.end.us.7: ; preds = %if.then.us.7, %cond.end154.us.7 + %26 = phi i32 [ 0, %if.then.us.7 ], [ %.sroa.speculated826.us.5, %cond.end154.us.7 ] + %arrayidx259.us = getelementptr i16, ptr %arrayidx257.us, i64 %conv96880.us + br label %for.body194.us + +for.body113.us: ; preds = %for.body113.us, %for.body99.us + br i1 %var_0, label %for.body113.us, label %for.cond131.preheader.us + +for.cond510.preheader.us: ; preds = %for.inc505.us + %cmp97.us = icmp slt i16 %add538, 0 + br i1 %cmp97.us, label %for.body99.us, label %for.cond.cleanup98.us + +for.cond376.preheader.us: ; preds = %for.inc371.us + %arrayidx384.us = getelementptr i16, ptr null, i64 %conv96880.us + %gep876.us = getelementptr [24 x [24 x i16]], ptr %invariant.gep875.us, i64 %conv96880.us + br label %for.body380.us + +for.cond131.preheader.us: ; preds = %for.body113.us + store i8 %var_3, ptr %arr_4, align 1 + %27 = load i16, ptr %gep884.us, align 2 + %28 = mul i16 18984, %27 + %div.us = zext i16 %28 to i32 + %tobool145.not.us = icmp eq i8 0, 0 + br i1 %tobool145.not.us, label %cond.end154.us, label %cond.true146.us + +for.cond95.preheader.us: ; preds = %for.cond.cleanup98.us, %for.body67.us + %indvars.iv1021 = phi i64 [ 1, %for.cond.cleanup98.us ], [ 0, %for.body67.us ] + %29 = phi i8 [ %16, %for.cond.cleanup98.us ], [ %4, %for.body67.us ] + %add413978.us = phi i16 [ %var_4, %for.cond.cleanup98.us ], [ %add413977.us, %for.body67.us ] + %30 = phi i8 [ %.sroa.speculated829.us, %for.cond.cleanup98.us ], [ %5, %for.body67.us ] + %mul354904.us = phi i64 [ 0, %for.cond.cleanup98.us ], [ %mul354903918.us, %for.body67.us ] + %gep884.us = getelementptr [24 x [24 x i16]], ptr %gep889.us, i64 %indvars.iv1021 + %arrayidx321.us = getelementptr i8, ptr %arrayidx319.us, i64 %indvars.iv1021 + %arrayidx257.us = getelementptr [24 x i16], ptr null, i64 %indvars.iv1021 + br label %for.body99.us + +for.cond563.preheader: ; preds = %for.body41, %entry + ret void + +for.body41: ; preds = %for.body41.lr.ph + store i8 0, ptr %arr_12, align 1 + store i8 0, ptr %arr_3, align 1 + br label %for.cond563.preheader +} + +attributes #0 = { nounwind "frame-pointer"="non-leaf" "target-cpu"="grace" } +attributes #1 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } +attributes #2 = { nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: write) } diff --git a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll index 57a1e4c..ec92edb 100644 --- a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll +++ b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll @@ -3385,7 +3385,7 @@ declare half @llvm.canonicalize.f16(half) declare <2 x half> @llvm.canonicalize.v2f16(<2 x half>) attributes #0 = { nounwind "amdgpu-ieee"="false" } -attributes #1 = { nounwind "unsafe-fp-math"="true" "no-nans-fp-math"="true" } +attributes #1 = { nounwind "no-nans-fp-math"="true" } ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; GFX11NONANS-FAKE16: {{.*}} ; GFX11NONANS-TRUE16: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/fdiv.f64.ll b/llvm/test/CodeGen/AMDGPU/fdiv.f64.ll index acb32d4..11476a6 100644 --- a/llvm/test/CodeGen/AMDGPU/fdiv.f64.ll +++ b/llvm/test/CodeGen/AMDGPU/fdiv.f64.ll @@ -127,7 +127,7 @@ define amdgpu_kernel void @s_fdiv_v4f64(ptr addrspace(1) %out, <4 x double> %num ; GCN-LABEL: {{^}}div_fast_2_x_pat_f64: ; GCN: v_mul_f64 [[MUL:v\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0.5 ; GCN: buffer_store_dwordx2 [[MUL]] -define amdgpu_kernel void @div_fast_2_x_pat_f64(ptr addrspace(1) %out) #1 { +define amdgpu_kernel void @div_fast_2_x_pat_f64(ptr addrspace(1) %out) #0 { %x = load double, ptr addrspace(1) poison %rcp = fdiv fast double %x, 2.0 store double %rcp, ptr addrspace(1) %out, align 4 @@ -139,7 +139,7 @@ define amdgpu_kernel void @div_fast_2_x_pat_f64(ptr addrspace(1) %out) #1 { ; GCN-DAG: v_mov_b32_e32 v[[K_HI:[0-9]+]], 0x3fb99999 ; GCN: v_mul_f64 [[MUL:v\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, v[[[K_LO]]:[[K_HI]]] ; GCN: buffer_store_dwordx2 [[MUL]] -define amdgpu_kernel void @div_fast_k_x_pat_f64(ptr addrspace(1) %out) #1 { +define amdgpu_kernel void @div_fast_k_x_pat_f64(ptr addrspace(1) %out) #0 { %x = load double, ptr addrspace(1) poison %rcp = fdiv fast double %x, 10.0 store double %rcp, ptr addrspace(1) %out, align 4 @@ -151,7 +151,7 @@ define amdgpu_kernel void @div_fast_k_x_pat_f64(ptr addrspace(1) %out) #1 { ; GCN-DAG: v_mov_b32_e32 v[[K_HI:[0-9]+]], 0xbfb99999 ; GCN: v_mul_f64 [[MUL:v\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, v[[[K_LO]]:[[K_HI]]] ; GCN: buffer_store_dwordx2 [[MUL]] -define amdgpu_kernel void @div_fast_neg_k_x_pat_f64(ptr addrspace(1) %out) #1 { +define amdgpu_kernel void @div_fast_neg_k_x_pat_f64(ptr addrspace(1) %out) #0 { %x = load double, ptr addrspace(1) poison %rcp = fdiv fast double %x, -10.0 store double %rcp, ptr addrspace(1) %out, align 4 @@ -159,4 +159,3 @@ define amdgpu_kernel void @div_fast_neg_k_x_pat_f64(ptr addrspace(1) %out) #1 { } attributes #0 = { nounwind } -attributes #1 = { nounwind "unsafe-fp-math"="true" } diff --git a/llvm/test/CodeGen/AMDGPU/fmad-formation-fmul-distribute-denormal-mode.ll b/llvm/test/CodeGen/AMDGPU/fmad-formation-fmul-distribute-denormal-mode.ll index 92eb4a6..0a266bc 100644 --- a/llvm/test/CodeGen/AMDGPU/fmad-formation-fmul-distribute-denormal-mode.ll +++ b/llvm/test/CodeGen/AMDGPU/fmad-formation-fmul-distribute-denormal-mode.ll @@ -284,4 +284,4 @@ define <2 x float> @unsafe_fast_fmul_fsub_ditribute_post_legalize(float %arg0, < ret <2 x float> %tmp1 } -attributes #0 = { "no-infs-fp-math"="true" "unsafe-fp-math"="true" } +attributes #0 = { "no-infs-fp-math"="true" } diff --git a/llvm/test/CodeGen/AMDGPU/fmed3.bf16.ll b/llvm/test/CodeGen/AMDGPU/fmed3.bf16.ll index bc85dc2..3e513de 100644 --- a/llvm/test/CodeGen/AMDGPU/fmed3.bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/fmed3.bf16.ll @@ -219,8 +219,8 @@ define <2 x bfloat> @v_test_fmed3_r_i_i_v2bf16_minimumnum_maximumnum(<2 x bfloat } attributes #0 = { nounwind readnone } -attributes #1 = { nounwind "unsafe-fp-math"="false" "no-nans-fp-math"="false" } -attributes #2 = { nounwind "unsafe-fp-math"="false" "no-nans-fp-math"="true" } +attributes #1 = { nounwind "no-nans-fp-math"="false" } +attributes #2 = { nounwind "no-nans-fp-math"="true" } ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; GFX11: {{.*}} ; GFX11-SDAG: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/fmed3.ll b/llvm/test/CodeGen/AMDGPU/fmed3.ll index 3145a27..60ac0b9 100644 --- a/llvm/test/CodeGen/AMDGPU/fmed3.ll +++ b/llvm/test/CodeGen/AMDGPU/fmed3.ll @@ -8905,4 +8905,4 @@ declare half @llvm.minnum.f16(half, half) #0 declare half @llvm.maxnum.f16(half, half) #0 attributes #0 = { nounwind readnone } -attributes #2 = { nounwind "unsafe-fp-math"="false" "no-nans-fp-math"="true" } +attributes #2 = { nounwind "no-nans-fp-math"="true" } diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.legal.f16.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.legal.f16.ll index d8bbda1..69d1ee3f 100644 --- a/llvm/test/CodeGen/AMDGPU/fneg-combines.legal.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.legal.f16.ll @@ -159,7 +159,7 @@ declare half @llvm.amdgcn.interp.p2.f16(float, float, i32, i32, i1, i32) #0 attributes #0 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" } attributes #1 = { nounwind readnone } -attributes #2 = { nounwind "unsafe-fp-math"="true" } +attributes #2 = { nounwind } attributes #3 = { nounwind "no-signed-zeros-fp-math"="true" } attributes #4 = { nounwind "amdgpu-ieee"="false" "denormal-fp-math-f32"="preserve-sign,preserve-sign" } ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll index aaea4f7..b3202cb 100644 --- a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll +++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll @@ -8006,7 +8006,7 @@ declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #0 attributes #0 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" } attributes #1 = { nounwind readnone } -attributes #2 = { nounwind "unsafe-fp-math"="true" } +attributes #2 = { nounwind } attributes #3 = { nounwind "no-signed-zeros-fp-math"="true" } ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; GCN-NSZ: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/frem.ll b/llvm/test/CodeGen/AMDGPU/frem.ll index 6f91222..d8cbdb1 100644 --- a/llvm/test/CodeGen/AMDGPU/frem.ll +++ b/llvm/test/CodeGen/AMDGPU/frem.ll @@ -2048,7 +2048,7 @@ define amdgpu_kernel void @unsafe_frem_f16(ptr addrspace(1) %out, ptr addrspace( ; GFX1200-FAKE16-NEXT: v_fmac_f16_e32 v1, v3, v2 ; GFX1200-FAKE16-NEXT: global_store_b16 v0, v1, s[0:1] ; GFX1200-FAKE16-NEXT: s_endpgm - ptr addrspace(1) %in2) #1 { + ptr addrspace(1) %in2) #0 { %gep2 = getelementptr half, ptr addrspace(1) %in2, i32 4 %r0 = load half, ptr addrspace(1) %in1, align 4 %r1 = load half, ptr addrspace(1) %gep2, align 4 @@ -3417,7 +3417,7 @@ define amdgpu_kernel void @unsafe_frem_f32(ptr addrspace(1) %out, ptr addrspace( ; GFX1200-NEXT: v_fmac_f32_e32 v1, v3, v2 ; GFX1200-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX1200-NEXT: s_endpgm - ptr addrspace(1) %in2) #1 { + ptr addrspace(1) %in2) #0 { %gep2 = getelementptr float, ptr addrspace(1) %in2, i32 4 %r0 = load float, ptr addrspace(1) %in1, align 4 %r1 = load float, ptr addrspace(1) %gep2, align 4 @@ -4821,7 +4821,7 @@ define amdgpu_kernel void @unsafe_frem_f64(ptr addrspace(1) %out, ptr addrspace( ; GFX1200-NEXT: v_fma_f64 v[0:1], -v[4:5], v[2:3], v[0:1] ; GFX1200-NEXT: global_store_b64 v12, v[0:1], s[0:1] ; GFX1200-NEXT: s_endpgm - ptr addrspace(1) %in2) #1 { + ptr addrspace(1) %in2) #0 { %r0 = load double, ptr addrspace(1) %in1, align 8 %r1 = load double, ptr addrspace(1) %in2, align 8 %r2 = frem afn double %r0, %r1 @@ -18918,7 +18918,4 @@ define amdgpu_kernel void @frem_v2f64_const(ptr addrspace(1) %out) #0 { -attributes #0 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="preserve-sign,preserve-sign" } -attributes #1 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="preserve-sign,preserve-sign" } - - +attributes #0 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" } diff --git a/llvm/test/CodeGen/AMDGPU/fsqrt.f64.ll b/llvm/test/CodeGen/AMDGPU/fsqrt.f64.ll index 1b74ddf..9b97981 100644 --- a/llvm/test/CodeGen/AMDGPU/fsqrt.f64.ll +++ b/llvm/test/CodeGen/AMDGPU/fsqrt.f64.ll @@ -2870,7 +2870,7 @@ define double @v_sqrt_f64__enough_unsafe_attrs(double %x) #3 { ret double %result } -define double @v_sqrt_f64__unsafe_attr(double %x) #4 { +define double @v_sqrt_f64__unsafe_attr(double %x) { ; GFX6-SDAG-LABEL: v_sqrt_f64__unsafe_attr: ; GFX6-SDAG: ; %bb.0: ; GFX6-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -3449,7 +3449,6 @@ declare i32 @llvm.amdgcn.readfirstlane(i32) #1 attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } attributes #1 = { convergent nounwind willreturn memory(none) } attributes #3 = { "no-nans-fp-math"="true" "no-infs-fp-math"="true" } -attributes #4 = { "unsafe-fp-math"="true" } ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; GFX6: {{.*}} ; GFX8: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/fsqrt.r600.ll b/llvm/test/CodeGen/AMDGPU/fsqrt.r600.ll index 9f19bcb..c93c077 100644 --- a/llvm/test/CodeGen/AMDGPU/fsqrt.r600.ll +++ b/llvm/test/CodeGen/AMDGPU/fsqrt.r600.ll @@ -239,4 +239,4 @@ declare <2 x float> @llvm.sqrt.v2f32(<2 x float> %in) #0 declare <4 x float> @llvm.sqrt.v4f32(<4 x float> %in) #0 attributes #0 = { nounwind readnone } -attributes #1 = { nounwind "unsafe-fp-math"="true" } +attributes #1 = { nounwind } diff --git a/llvm/test/CodeGen/AMDGPU/inline-attr.ll b/llvm/test/CodeGen/AMDGPU/inline-attr.ll index 4e93eca..c33b3344 100644 --- a/llvm/test/CodeGen/AMDGPU/inline-attr.ll +++ b/llvm/test/CodeGen/AMDGPU/inline-attr.ll @@ -36,18 +36,18 @@ entry: ret void } -attributes #0 = { nounwind "uniform-work-group-size"="false" "unsafe-fp-math"="true"} -attributes #1 = { nounwind "less-precise-fpmad"="true" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "unsafe-fp-math"="true" } +attributes #0 = { nounwind "uniform-work-group-size"="false"} +attributes #1 = { nounwind "less-precise-fpmad"="true" "no-infs-fp-math"="true" "no-nans-fp-math"="true" } ;. -; UNSAFE: attributes #[[ATTR0]] = { nounwind "uniform-work-group-size"="false" "unsafe-fp-math"="true" } -; UNSAFE: attributes #[[ATTR1]] = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "uniform-work-group-size"="false" "unsafe-fp-math"="true" } +; UNSAFE: attributes #[[ATTR0]] = { nounwind "uniform-work-group-size"="false" } +; UNSAFE: attributes #[[ATTR1]] = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "uniform-work-group-size"="false" } ;. -; NONANS: attributes #[[ATTR0]] = { nounwind "no-nans-fp-math"="true" "uniform-work-group-size"="false" "unsafe-fp-math"="true" } -; NONANS: attributes #[[ATTR1]] = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="true" "uniform-work-group-size"="false" "unsafe-fp-math"="true" } +; NONANS: attributes #[[ATTR0]] = { nounwind "no-nans-fp-math"="true" "uniform-work-group-size"="false" } +; NONANS: attributes #[[ATTR1]] = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="true" "uniform-work-group-size"="false" } ;. -; NOINFS: attributes #[[ATTR0]] = { nounwind "no-infs-fp-math"="true" "uniform-work-group-size"="false" "unsafe-fp-math"="true" } -; NOINFS: attributes #[[ATTR1]] = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="true" "no-nans-fp-math"="false" "uniform-work-group-size"="false" "unsafe-fp-math"="true" } +; NOINFS: attributes #[[ATTR0]] = { nounwind "no-infs-fp-math"="true" "uniform-work-group-size"="false" } +; NOINFS: attributes #[[ATTR1]] = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="true" "no-nans-fp-math"="false" "uniform-work-group-size"="false" } ;. ; UNSAFE: [[META0]] = !{} ;. diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.add.min.max.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.add.min.max.ll new file mode 100644 index 0000000..99421d4 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.add.min.max.ll @@ -0,0 +1,191 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GCN,GFX1250-SDAG %s +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GCN,GFX1250-GISEL %s + +declare i32 @llvm.amdgcn.add.min.i32(i32, i32, i32, i1) +declare i32 @llvm.amdgcn.add.max.i32(i32, i32, i32, i1) +declare i32 @llvm.amdgcn.add.min.u32(i32, i32, i32, i1) +declare i32 @llvm.amdgcn.add.max.u32(i32, i32, i32, i1) +declare <2 x i16> @llvm.amdgcn.pk.add.min.i16(<2 x i16>, <2 x i16>, <2 x i16>, i1) +declare <2 x i16> @llvm.amdgcn.pk.add.max.i16(<2 x i16>, <2 x i16>, <2 x i16>, i1) +declare <2 x i16> @llvm.amdgcn.pk.add.min.u16(<2 x i16>, <2 x i16>, <2 x i16>, i1) +declare <2 x i16> @llvm.amdgcn.pk.add.max.u16(<2 x i16>, <2 x i16>, <2 x i16>, i1) + +define i32 @test_add_min_i32_vvv(i32 %a, i32 %b, i32 %c) { +; GCN-LABEL: test_add_min_i32_vvv: +; GCN: ; %bb.0: +; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_add_min_i32 v0, v0, v1, v2 +; GCN-NEXT: s_set_pc_i64 s[30:31] + %ret = tail call i32 @llvm.amdgcn.add.min.i32(i32 %a, i32 %b, i32 %c, i1 0) + ret i32 %ret +} + +define i32 @test_add_min_i32_ssi_clamp(i32 inreg %a, i32 inreg %b) { +; GCN-LABEL: test_add_min_i32_ssi_clamp: +; GCN: ; %bb.0: +; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_add_min_i32 v0, s0, s1, 1 clamp +; GCN-NEXT: s_set_pc_i64 s[30:31] + %ret = tail call i32 @llvm.amdgcn.add.min.i32(i32 %a, i32 %b, i32 1, i1 1) + ret i32 %ret +} + +define i32 @test_add_min_u32_vvv(i32 %a, i32 %b, i32 %c) { +; GCN-LABEL: test_add_min_u32_vvv: +; GCN: ; %bb.0: +; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_add_min_u32 v0, v0, v1, v2 +; GCN-NEXT: s_set_pc_i64 s[30:31] + %ret = tail call i32 @llvm.amdgcn.add.min.u32(i32 %a, i32 %b, i32 %c, i1 0) + ret i32 %ret +} + +define i32 @test_add_min_u32_ssi_clamp(i32 inreg %a, i32 inreg %b) { +; GCN-LABEL: test_add_min_u32_ssi_clamp: +; GCN: ; %bb.0: +; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_add_min_u32 v0, s0, s1, 1 clamp +; GCN-NEXT: s_set_pc_i64 s[30:31] + %ret = tail call i32 @llvm.amdgcn.add.min.u32(i32 %a, i32 %b, i32 1, i1 1) + ret i32 %ret +} + +define i32 @test_add_max_i32_vvv(i32 %a, i32 %b, i32 %c) { +; GCN-LABEL: test_add_max_i32_vvv: +; GCN: ; %bb.0: +; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_add_max_i32 v0, v0, v1, v2 +; GCN-NEXT: s_set_pc_i64 s[30:31] + %ret = tail call i32 @llvm.amdgcn.add.max.i32(i32 %a, i32 %b, i32 %c, i1 0) + ret i32 %ret +} + +define i32 @test_add_max_i32_ssi_clamp(i32 inreg %a, i32 inreg %b) { +; GCN-LABEL: test_add_max_i32_ssi_clamp: +; GCN: ; %bb.0: +; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_add_max_i32 v0, s0, s1, 1 clamp +; GCN-NEXT: s_set_pc_i64 s[30:31] + %ret = tail call i32 @llvm.amdgcn.add.max.i32(i32 %a, i32 %b, i32 1, i1 1) + ret i32 %ret +} + +define i32 @test_add_max_u32_vvv(i32 %a, i32 %b, i32 %c) { +; GCN-LABEL: test_add_max_u32_vvv: +; GCN: ; %bb.0: +; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_add_max_u32 v0, v0, v1, v2 +; GCN-NEXT: s_set_pc_i64 s[30:31] + %ret = tail call i32 @llvm.amdgcn.add.max.u32(i32 %a, i32 %b, i32 %c, i1 0) + ret i32 %ret +} + +define i32 @test_add_max_u32_ssi_clamp(i32 inreg %a, i32 inreg %b) { +; GCN-LABEL: test_add_max_u32_ssi_clamp: +; GCN: ; %bb.0: +; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_add_max_u32 v0, s0, s1, 1 clamp +; GCN-NEXT: s_set_pc_i64 s[30:31] + %ret = tail call i32 @llvm.amdgcn.add.max.u32(i32 %a, i32 %b, i32 1, i1 1) + ret i32 %ret +} + +define <2 x i16> @test_add_min_i16_vvv(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c) { +; GCN-LABEL: test_add_min_i16_vvv: +; GCN: ; %bb.0: +; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_pk_add_min_i16 v0, v0, v1, v2 +; GCN-NEXT: s_set_pc_i64 s[30:31] + %ret = tail call <2 x i16> @llvm.amdgcn.pk.add.min.i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, i1 0) + ret <2 x i16> %ret +} + +define <2 x i16> @test_add_min_i16_ssi_clamp(<2 x i16> inreg %a, <2 x i16> inreg %b) { +; GCN-LABEL: test_add_min_i16_ssi_clamp: +; GCN: ; %bb.0: +; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_pk_add_min_i16 v0, s0, s1, 1 op_sel_hi:[1,1,0] clamp +; GCN-NEXT: s_set_pc_i64 s[30:31] + %ret = tail call <2 x i16> @llvm.amdgcn.pk.add.min.i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> <i16 1, i16 1>, i1 1) + ret <2 x i16> %ret +} + +define <2 x i16> @test_add_min_u16_vvv(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c) { +; GCN-LABEL: test_add_min_u16_vvv: +; GCN: ; %bb.0: +; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_pk_add_min_u16 v0, v0, v1, v2 +; GCN-NEXT: s_set_pc_i64 s[30:31] + %ret = tail call <2 x i16> @llvm.amdgcn.pk.add.min.u16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, i1 0) + ret <2 x i16> %ret +} + +define <2 x i16> @test_add_min_u16_ssi_clamp(<2 x i16> inreg %a, <2 x i16> inreg %b) { +; GCN-LABEL: test_add_min_u16_ssi_clamp: +; GCN: ; %bb.0: +; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_pk_add_min_u16 v0, s0, s1, 1 op_sel_hi:[1,1,0] clamp +; GCN-NEXT: s_set_pc_i64 s[30:31] + %ret = tail call <2 x i16> @llvm.amdgcn.pk.add.min.u16(<2 x i16> %a, <2 x i16> %b, <2 x i16> <i16 1, i16 1>, i1 1) + ret <2 x i16> %ret +} + +define <2 x i16> @test_add_max_i16_vvv(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c) { +; GCN-LABEL: test_add_max_i16_vvv: +; GCN: ; %bb.0: +; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_pk_add_max_i16 v0, v0, v1, v2 +; GCN-NEXT: s_set_pc_i64 s[30:31] + %ret = tail call <2 x i16> @llvm.amdgcn.pk.add.max.i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, i1 0) + ret <2 x i16> %ret +} + +define <2 x i16> @test_add_max_i16_ssi_clamp(<2 x i16> inreg %a, <2 x i16> inreg %b) { +; GCN-LABEL: test_add_max_i16_ssi_clamp: +; GCN: ; %bb.0: +; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_pk_add_max_i16 v0, s0, s1, 1 op_sel_hi:[1,1,0] clamp +; GCN-NEXT: s_set_pc_i64 s[30:31] + %ret = tail call <2 x i16> @llvm.amdgcn.pk.add.max.i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> <i16 1, i16 1>, i1 1) + ret <2 x i16> %ret +} + +define <2 x i16> @test_add_max_u16_vvv(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c) { +; GCN-LABEL: test_add_max_u16_vvv: +; GCN: ; %bb.0: +; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_pk_add_max_u16 v0, v0, v1, v2 +; GCN-NEXT: s_set_pc_i64 s[30:31] + %ret = tail call <2 x i16> @llvm.amdgcn.pk.add.max.u16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, i1 0) + ret <2 x i16> %ret +} + +define <2 x i16> @test_add_max_u16_ssi_clamp(<2 x i16> inreg %a, <2 x i16> inreg %b) { +; GCN-LABEL: test_add_max_u16_ssi_clamp: +; GCN: ; %bb.0: +; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_pk_add_max_u16 v0, s0, s1, 1 op_sel_hi:[1,1,0] clamp +; GCN-NEXT: s_set_pc_i64 s[30:31] + %ret = tail call <2 x i16> @llvm.amdgcn.pk.add.max.u16(<2 x i16> %a, <2 x i16> %b, <2 x i16> <i16 1, i16 1>, i1 1) + ret <2 x i16> %ret +} +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; GFX1250-GISEL: {{.*}} +; GFX1250-SDAG: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll index 883db20..e30a586 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll @@ -1485,7 +1485,7 @@ define float @v_exp2_f32_fast(float %in) { ret float %result } -define float @v_exp2_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" { +define float @v_exp2_f32_unsafe_math_attr(float %in) { ; SI-SDAG-LABEL: v_exp2_f32_unsafe_math_attr: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log2.ll b/llvm/test/CodeGen/AMDGPU/llvm.log2.ll index 0854134..61a777f 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.log2.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.log2.ll @@ -1907,7 +1907,7 @@ define float @v_log2_f32_fast(float %in) { ret float %result } -define float @v_log2_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" { +define float @v_log2_f32_unsafe_math_attr(float %in) { ; SI-SDAG-LABEL: v_log2_f32_unsafe_math_attr: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) diff --git a/llvm/test/CodeGen/AMDGPU/minmax.ll b/llvm/test/CodeGen/AMDGPU/minmax.ll index d578d2e..60570bd 100644 --- a/llvm/test/CodeGen/AMDGPU/minmax.ll +++ b/llvm/test/CodeGen/AMDGPU/minmax.ll @@ -1296,4 +1296,4 @@ declare half @llvm.minnum.f16(half, half) declare half @llvm.maxnum.f16(half, half) declare float @llvm.minnum.f32(float, float) declare float @llvm.maxnum.f32(float, float) -attributes #0 = { nounwind "unsafe-fp-math"="false" "no-nans-fp-math"="true" } +attributes #0 = { nounwind "no-nans-fp-math"="true" } diff --git a/llvm/test/CodeGen/AMDGPU/stackguard.ll b/llvm/test/CodeGen/AMDGPU/stackguard.ll new file mode 100644 index 0000000..393686f --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/stackguard.ll @@ -0,0 +1,14 @@ +; RUN: not llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -filetype=null %s 2>&1 | FileCheck %s +; RUN: not llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -filetype=null %s 2>&1 | FileCheck %s + +; FIXME: To actually support stackguard, need to fix intrinsic to +; return pointer in any address space. + +; CHECK: error: unable to lower stackguard +define i1 @test_stackguard(ptr %p1) { + %p2 = call ptr @llvm.stackguard() + %res = icmp ne ptr %p2, %p1 + ret i1 %res +} + +declare ptr @llvm.stackguard() diff --git a/llvm/test/CodeGen/DirectX/CBufferAccess/unused.ll b/llvm/test/CodeGen/DirectX/CBufferAccess/unused.ll new file mode 100644 index 0000000..8c0d82e --- /dev/null +++ b/llvm/test/CodeGen/DirectX/CBufferAccess/unused.ll @@ -0,0 +1,13 @@ +; RUN: opt -S -dxil-cbuffer-access -mtriple=dxil--shadermodel6.3-library %s | FileCheck %s +; Check that we correctly ignore cbuffers that were nulled out by optimizations. + +%__cblayout_CB = type <{ float }> +@CB.cb = local_unnamed_addr global target("dx.CBuffer", %__cblayout_CB) poison +@x = external local_unnamed_addr addrspace(2) global float, align 4 + +; CHECK-NOT: !hlsl.cbs = +!hlsl.cbs = !{!0, !1, !2} + +!0 = !{ptr @CB.cb, ptr addrspace(2) @x} +!1 = !{ptr @CB.cb, null} +!2 = !{null, null} diff --git a/llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll b/llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll index 245f764..7149cdb 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll @@ -32,9 +32,7 @@ define <16 x i16> @shuffle_v16i16(<16 x i16> %a) { ; CHECK: # %bb.0: ; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI2_0) ; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI2_0) -; CHECK-NEXT: xvpermi.d $xr2, $xr0, 78 -; CHECK-NEXT: xvshuf.w $xr1, $xr2, $xr0 -; CHECK-NEXT: xvori.b $xr0, $xr1, 0 +; CHECK-NEXT: xvperm.w $xr0, $xr0, $xr1 ; CHECK-NEXT: ret %shuffle = shufflevector <16 x i16> %a, <16 x i16> poison, <16 x i32> <i32 8, i32 9, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> ret <16 x i16> %shuffle @@ -55,9 +53,7 @@ define <16 x i16> @shuffle_v16i16_same_lane(<16 x i16> %a) { define <8 x i32> @shuffle_v8i32(<8 x i32> %a) { ; CHECK-LABEL: shuffle_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI4_0) -; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI4_0) -; CHECK-NEXT: xvperm.w $xr0, $xr0, $xr1 +; CHECK-NEXT: xvpermi.d $xr0, $xr0, 226 ; CHECK-NEXT: ret %shuffle = shufflevector <8 x i32> %a, <8 x i32> poison, <8 x i32> <i32 4, i32 5, i32 0, i32 1, i32 4, i32 5, i32 6, i32 7> ret <8 x i32> %shuffle @@ -93,9 +89,7 @@ define <4 x i64> @shuffle_v4i64_same_lane(<4 x i64> %a) { define <8 x float> @shuffle_v8f32(<8 x float> %a) { ; CHECK-LABEL: shuffle_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI8_0) -; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI8_0) -; CHECK-NEXT: xvperm.w $xr0, $xr0, $xr1 +; CHECK-NEXT: xvpermi.d $xr0, $xr0, 226 ; CHECK-NEXT: ret %shuffle = shufflevector <8 x float> %a, <8 x float> poison, <8 x i32> <i32 4, i32 5, i32 0, i32 1, i32 4, i32 5, i32 6, i32 7> ret <8 x float> %shuffle diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll index 061b2b0..abd00b6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll @@ -11,33 +11,80 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: llc -mtriple=riscv64 -mattr=+zvfh,+experimental-zvfbfa,+v \ +; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFBFA define <vscale x 1 x bfloat> @vfadd_vv_nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %vb) { -; CHECK-LABEL: vfadd_vv_nxv1bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9 -; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma -; CHECK-NEXT: vfadd.vv v9, v9, v10 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv1bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFH-NEXT: vfadd.vv v9, v9, v10 +; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv1bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv1bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v9, v10 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %vc = fadd <vscale x 1 x bfloat> %va, %vb ret <vscale x 1 x bfloat> %vc } define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16(<vscale x 1 x bfloat> %va, bfloat %b) { -; CHECK-LABEL: vfadd_vf_nxv1bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: fcvt.s.bf16 fa5, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma -; CHECK-NEXT: vfadd.vf v9, v9, fa5 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv1bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFH-NEXT: vfadd.vf v9, v9, fa5 +; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv1bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfadd.vf v9, v9, fa5 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv1bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vf v9, v9, fa5 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %head = insertelement <vscale x 1 x bfloat> poison, bfloat %b, i32 0 %splat = shufflevector <vscale x 1 x bfloat> %head, <vscale x 1 x bfloat> poison, <vscale x 1 x i32> zeroinitializer %vc = fadd <vscale x 1 x bfloat> %va, %splat @@ -45,31 +92,75 @@ define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16(<vscale x 1 x bfloat> %va, bfloa } define <vscale x 2 x bfloat> @vfadd_vv_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb) { -; CHECK-LABEL: vfadd_vv_nxv2bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9 -; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; CHECK-NEXT: vfadd.vv v9, v9, v10 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv2bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFH-NEXT: vfadd.vv v9, v9, v10 +; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv2bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv2bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, mf2, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v9, v10 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %vc = fadd <vscale x 2 x bfloat> %va, %vb ret <vscale x 2 x bfloat> %vc } define <vscale x 2 x bfloat> @vfadd_vf_nxv2bf16(<vscale x 2 x bfloat> %va, bfloat %b) { -; CHECK-LABEL: vfadd_vf_nxv2bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: fcvt.s.bf16 fa5, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; CHECK-NEXT: vfadd.vf v9, v9, fa5 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv2bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFH-NEXT: vfadd.vf v9, v9, fa5 +; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv2bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfadd.vf v9, v9, fa5 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv2bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, mf2, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFBFA-NEXT: vfadd.vf v9, v9, fa5 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %head = insertelement <vscale x 2 x bfloat> poison, bfloat %b, i32 0 %splat = shufflevector <vscale x 2 x bfloat> %head, <vscale x 2 x bfloat> poison, <vscale x 2 x i32> zeroinitializer %vc = fadd <vscale x 2 x bfloat> %va, %splat @@ -77,31 +168,75 @@ define <vscale x 2 x bfloat> @vfadd_vf_nxv2bf16(<vscale x 2 x bfloat> %va, bfloa } define <vscale x 4 x bfloat> @vfadd_vv_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %vb) { -; CHECK-LABEL: vfadd_vv_nxv4bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9 -; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma -; CHECK-NEXT: vfadd.vv v10, v12, v10 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv4bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFH-NEXT: vfadd.vv v10, v12, v10 +; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv4bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v10, v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv4bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m1, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v10, v12, v10 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m1, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10 +; ZVFBFA-NEXT: ret %vc = fadd <vscale x 4 x bfloat> %va, %vb ret <vscale x 4 x bfloat> %vc } define <vscale x 4 x bfloat> @vfadd_vf_nxv4bf16(<vscale x 4 x bfloat> %va, bfloat %b) { -; CHECK-LABEL: vfadd_vf_nxv4bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: fcvt.s.bf16 fa5, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma -; CHECK-NEXT: vfadd.vf v10, v10, fa5 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv4bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFH-NEXT: vfadd.vf v10, v10, fa5 +; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv4bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfadd.vf v10, v10, fa5 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv4bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m1, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFBFA-NEXT: vfadd.vf v10, v10, fa5 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m1, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10 +; ZVFBFA-NEXT: ret %head = insertelement <vscale x 4 x bfloat> poison, bfloat %b, i32 0 %splat = shufflevector <vscale x 4 x bfloat> %head, <vscale x 4 x bfloat> poison, <vscale x 4 x i32> zeroinitializer %vc = fadd <vscale x 4 x bfloat> %va, %splat @@ -109,31 +244,75 @@ define <vscale x 4 x bfloat> @vfadd_vf_nxv4bf16(<vscale x 4 x bfloat> %va, bfloa } define <vscale x 8 x bfloat> @vfadd_vv_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) { -; CHECK-LABEL: vfadd_vv_nxv8bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v10 -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; CHECK-NEXT: vfadd.vv v12, v16, v12 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv8bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v10 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFH-NEXT: vfadd.vv v12, v16, v12 +; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv8bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v12, v16, v12 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv8bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFBFA-NEXT: vfadd.vv v12, v16, v12 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12 +; ZVFBFA-NEXT: ret %vc = fadd <vscale x 8 x bfloat> %va, %vb ret <vscale x 8 x bfloat> %vc } define <vscale x 8 x bfloat> @vfadd_vf_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat %b) { -; CHECK-LABEL: vfadd_vf_nxv8bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: fcvt.s.bf16 fa5, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; CHECK-NEXT: vfadd.vf v12, v12, fa5 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv8bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFH-NEXT: vfadd.vf v12, v12, fa5 +; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv8bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfadd.vf v12, v12, fa5 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv8bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFBFA-NEXT: vfadd.vf v12, v12, fa5 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12 +; ZVFBFA-NEXT: ret %head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0 %splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer %vc = fadd <vscale x 8 x bfloat> %va, %splat @@ -141,16 +320,38 @@ define <vscale x 8 x bfloat> @vfadd_vf_nxv8bf16(<vscale x 8 x bfloat> %va, bfloa } define <vscale x 8 x bfloat> @vfadd_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat %b) { -; CHECK-LABEL: vfadd_fv_nxv8bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: fcvt.s.bf16 fa5, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; CHECK-NEXT: vfadd.vf v12, v12, fa5 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_fv_nxv8bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFH-NEXT: vfadd.vf v12, v12, fa5 +; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_fv_nxv8bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfadd.vf v12, v12, fa5 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_fv_nxv8bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFBFA-NEXT: vfadd.vf v12, v12, fa5 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12 +; ZVFBFA-NEXT: ret %head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0 %splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer %vc = fadd <vscale x 8 x bfloat> %splat, %va @@ -158,31 +359,75 @@ define <vscale x 8 x bfloat> @vfadd_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloa } define <vscale x 16 x bfloat> @vfadd_vv_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %vb) { -; CHECK-LABEL: vfadd_vv_nxv16bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12 -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v24, v16 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv16bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v24, v16 +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv16bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv16bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v24, v16 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16 +; ZVFBFA-NEXT: ret %vc = fadd <vscale x 16 x bfloat> %va, %vb ret <vscale x 16 x bfloat> %vc } define <vscale x 16 x bfloat> @vfadd_vf_nxv16bf16(<vscale x 16 x bfloat> %va, bfloat %b) { -; CHECK-LABEL: vfadd_vf_nxv16bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: fcvt.s.bf16 fa5, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vf v16, v16, fa5 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv16bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vf v16, v16, fa5 +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv16bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vf v16, v16, fa5 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv16bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vf v16, v16, fa5 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16 +; ZVFBFA-NEXT: ret %head = insertelement <vscale x 16 x bfloat> poison, bfloat %b, i32 0 %splat = shufflevector <vscale x 16 x bfloat> %head, <vscale x 16 x bfloat> poison, <vscale x 16 x i32> zeroinitializer %vc = fadd <vscale x 16 x bfloat> %va, %splat @@ -190,78 +435,216 @@ define <vscale x 16 x bfloat> @vfadd_vf_nxv16bf16(<vscale x 16 x bfloat> %va, bf } define <vscale x 32 x bfloat> @vfadd_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %vb) { -; CHECK-LABEL: vfadd_vv_nxv32bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: sub sp, sp, a0 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v16 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill -; CHECK-NEXT: vfwcvtbf16.f.f.v v0, v8 -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v20 -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12 -; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v0, v0, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v0 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v16, v24 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add sp, sp, a0 -; CHECK-NEXT: .cfi_def_cfa sp, 16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: .cfi_def_cfa_offset 0 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv32bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: addi sp, sp, -16 +; ZVFH-NEXT: .cfi_def_cfa_offset 16 +; ZVFH-NEXT: csrr a0, vlenb +; ZVFH-NEXT: slli a0, a0, 3 +; ZVFH-NEXT: sub sp, sp, a0 +; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v16 +; ZVFH-NEXT: addi a0, sp, 16 +; ZVFH-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill +; ZVFH-NEXT: vfwcvtbf16.f.f.v v0, v8 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v20 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12 +; ZVFH-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v0, v0, v8 +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v0 +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v16, v24 +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16 +; ZVFH-NEXT: csrr a0, vlenb +; ZVFH-NEXT: slli a0, a0, 3 +; ZVFH-NEXT: add sp, sp, a0 +; ZVFH-NEXT: .cfi_def_cfa sp, 16 +; ZVFH-NEXT: addi sp, sp, 16 +; ZVFH-NEXT: .cfi_def_cfa_offset 0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv32bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: sub sp, sp, a0 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v16 +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v0, v8 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v20 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12 +; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v0, v0, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v0 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv32bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: addi sp, sp, -16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 16 +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 3 +; ZVFBFA-NEXT: sub sp, sp, a0 +; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFBFA-NEXT: addi a0, sp, 16 +; ZVFBFA-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill +; ZVFBFA-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20 +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFBFA-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v0, v0, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v0 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v24 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16 +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 3 +; ZVFBFA-NEXT: add sp, sp, a0 +; ZVFBFA-NEXT: .cfi_def_cfa sp, 16 +; ZVFBFA-NEXT: addi sp, sp, 16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 0 +; ZVFBFA-NEXT: ret %vc = fadd <vscale x 32 x bfloat> %va, %vb ret <vscale x 32 x bfloat> %vc } define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bfloat %b) { -; CHECK-LABEL: vfadd_vf_nxv32bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: sub sp, sp, a0 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb -; CHECK-NEXT: fmv.x.h a0, fa0 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma -; CHECK-NEXT: vmv.v.x v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v0, v8 -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v0, v8, v0 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v0 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v24, v16 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add sp, sp, a0 -; CHECK-NEXT: .cfi_def_cfa sp, 16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: .cfi_def_cfa_offset 0 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv32bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: addi sp, sp, -16 +; ZVFH-NEXT: .cfi_def_cfa_offset 16 +; ZVFH-NEXT: csrr a0, vlenb +; ZVFH-NEXT: slli a0, a0, 3 +; ZVFH-NEXT: sub sp, sp, a0 +; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFH-NEXT: fmv.x.h a0, fa0 +; ZVFH-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8 +; ZVFH-NEXT: addi a1, sp, 16 +; ZVFH-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v12 +; ZVFH-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; ZVFH-NEXT: vmv.v.x v8, a0 +; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v0, v8 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12 +; ZVFH-NEXT: addi a0, sp, 16 +; ZVFH-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v0, v8, v0 +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v0 +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v24, v16 +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16 +; ZVFH-NEXT: csrr a0, vlenb +; ZVFH-NEXT: slli a0, a0, 3 +; ZVFH-NEXT: add sp, sp, a0 +; ZVFH-NEXT: .cfi_def_cfa sp, 16 +; ZVFH-NEXT: addi sp, sp, 16 +; ZVFH-NEXT: .cfi_def_cfa_offset 0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv32bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: sub sp, sp, a0 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a0, fa0 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v12 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v8, a0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v0, v8 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12 +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v0, v8, v0 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v0 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv32bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: addi sp, sp, -16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 16 +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 3 +; ZVFBFA-NEXT: sub sp, sp, a0 +; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFBFA-NEXT: fmv.x.h a0, fa0 +; ZVFBFA-NEXT: vsetvli a1, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFBFA-NEXT: addi a1, sp, 16 +; ZVFBFA-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFBFA-NEXT: vsetvli a1, zero, e16alt, m8, ta, ma +; ZVFBFA-NEXT: vmv.v.x v8, a0 +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFBFA-NEXT: addi a0, sp, 16 +; ZVFBFA-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v0, v8, v0 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v0 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v24, v16 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16 +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 3 +; ZVFBFA-NEXT: add sp, sp, a0 +; ZVFBFA-NEXT: .cfi_def_cfa sp, 16 +; ZVFBFA-NEXT: addi sp, sp, 16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 0 +; ZVFBFA-NEXT: ret %head = insertelement <vscale x 32 x bfloat> poison, bfloat %b, i32 0 %splat = shufflevector <vscale x 32 x bfloat> %head, <vscale x 32 x bfloat> poison, <vscale x 32 x i32> zeroinitializer %vc = fadd <vscale x 32 x bfloat> %va, %splat @@ -285,6 +668,12 @@ define <vscale x 1 x half> @vfadd_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv1f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFBFA-NEXT: vfadd.vv v8, v8, v9 +; ZVFBFA-NEXT: ret %vc = fadd <vscale x 1 x half> %va, %vb ret <vscale x 1 x half> %vc } @@ -306,6 +695,12 @@ define <vscale x 1 x half> @vfadd_vf_nxv1f16(<vscale x 1 x half> %va, half %b) { ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv1f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0 +; ZVFBFA-NEXT: ret %head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %vc = fadd <vscale x 1 x half> %va, %splat @@ -329,6 +724,12 @@ define <vscale x 2 x half> @vfadd_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv2f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v8, v8, v9 +; ZVFBFA-NEXT: ret %vc = fadd <vscale x 2 x half> %va, %vb ret <vscale x 2 x half> %vc } @@ -350,6 +751,12 @@ define <vscale x 2 x half> @vfadd_vf_nxv2f16(<vscale x 2 x half> %va, half %b) { ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv2f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0 +; ZVFBFA-NEXT: ret %head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %vc = fadd <vscale x 2 x half> %va, %splat @@ -373,6 +780,12 @@ define <vscale x 4 x half> @vfadd_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv4f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFBFA-NEXT: vfadd.vv v8, v8, v9 +; ZVFBFA-NEXT: ret %vc = fadd <vscale x 4 x half> %va, %vb ret <vscale x 4 x half> %vc } @@ -394,6 +807,12 @@ define <vscale x 4 x half> @vfadd_vf_nxv4f16(<vscale x 4 x half> %va, half %b) { ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv4f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0 +; ZVFBFA-NEXT: ret %head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %vc = fadd <vscale x 4 x half> %va, %splat @@ -417,6 +836,12 @@ define <vscale x 8 x half> @vfadd_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv8f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v8, v8, v10 +; ZVFBFA-NEXT: ret %vc = fadd <vscale x 8 x half> %va, %vb ret <vscale x 8 x half> %vc } @@ -438,6 +863,12 @@ define <vscale x 8 x half> @vfadd_vf_nxv8f16(<vscale x 8 x half> %va, half %b) { ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv8f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0 +; ZVFBFA-NEXT: ret %head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %vc = fadd <vscale x 8 x half> %va, %splat @@ -461,6 +892,12 @@ define <vscale x 8 x half> @vfadd_fv_nxv8f16(<vscale x 8 x half> %va, half %b) { ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_fv_nxv8f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0 +; ZVFBFA-NEXT: ret %head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %vc = fadd <vscale x 8 x half> %splat, %va @@ -484,6 +921,12 @@ define <vscale x 16 x half> @vfadd_vv_nxv16f16(<vscale x 16 x half> %va, <vscale ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv16f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFBFA-NEXT: vfadd.vv v8, v8, v12 +; ZVFBFA-NEXT: ret %vc = fadd <vscale x 16 x half> %va, %vb ret <vscale x 16 x half> %vc } @@ -505,6 +948,12 @@ define <vscale x 16 x half> @vfadd_vf_nxv16f16(<vscale x 16 x half> %va, half %b ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv16f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0 +; ZVFBFA-NEXT: ret %head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %vc = fadd <vscale x 16 x half> %va, %splat @@ -549,6 +998,12 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale ; ZVFHMIN-NEXT: addi sp, sp, 16 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv32f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v8, v8, v16 +; ZVFBFA-NEXT: ret %vc = fadd <vscale x 32 x half> %va, %vb ret <vscale x 32 x half> %vc } @@ -596,6 +1051,12 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b ; ZVFHMIN-NEXT: addi sp, sp, 16 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv32f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0 +; ZVFBFA-NEXT: ret %head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %vc = fadd <vscale x 32 x half> %va, %splat diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll index 32e3d6b..633a201 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll @@ -11,52 +11,125 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: llc -mtriple=riscv64 -mattr=+zvfhmin,+experimental-zvfbfa,+v \ +; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFBFA declare <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat>, <vscale x 1 x bfloat>, <vscale x 1 x i1>, i32) define <vscale x 1 x bfloat> @vfadd_vv_nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vv_nxv1bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma -; CHECK-NEXT: vfadd.vv v9, v9, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv1bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFH-NEXT: vfadd.vv v9, v9, v10, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv1bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv1bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v9, v10, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t +; ZVFBFA-NEXT: ret %v = call <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %b, <vscale x 1 x i1> %m, i32 %evl) ret <vscale x 1 x bfloat> %v } define <vscale x 1 x bfloat> @vfadd_vv_nxv1bf16_unmasked(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vv_nxv1bf16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9 -; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma -; CHECK-NEXT: vfadd.vv v9, v9, v10 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv1bf16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFH-NEXT: vfadd.vv v9, v9, v10 +; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv1bf16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv1bf16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v9, v10 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %v = call <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl) ret <vscale x 1 x bfloat> %v } define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16(<vscale x 1 x bfloat> %va, bfloat %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv1bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmv.v.x v9, a1 -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma -; CHECK-NEXT: vfadd.vv v9, v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv1bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vmv.v.x v9, a1 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFH-NEXT: vfadd.vv v9, v10, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv1bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v9, a1 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v9, v10, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv1bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vmv.v.x v9, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v10, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 1 x bfloat> poison, bfloat %b, i32 0 %vb = shufflevector <vscale x 1 x bfloat> %elt.head, <vscale x 1 x bfloat> poison, <vscale x 1 x i32> zeroinitializer %v = call <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %vb, <vscale x 1 x i1> %m, i32 %evl) @@ -64,18 +137,44 @@ define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16(<vscale x 1 x bfloat> %va, bfloa } define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16_commute(<vscale x 1 x bfloat> %va, bfloat %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv1bf16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmv.v.x v9, a1 -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma -; CHECK-NEXT: vfadd.vv v9, v8, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv1bf16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vmv.v.x v9, a1 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFH-NEXT: vfadd.vv v9, v8, v10, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv1bf16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v9, a1 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v9, v8, v10, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv1bf16_commute: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vmv.v.x v9, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v8, v10, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 1 x bfloat> poison, bfloat %b, i32 0 %vb = shufflevector <vscale x 1 x bfloat> %elt.head, <vscale x 1 x bfloat> poison, <vscale x 1 x i32> zeroinitializer %v = call <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat> %vb, <vscale x 1 x bfloat> %va, <vscale x 1 x i1> %m, i32 %evl) @@ -83,18 +182,44 @@ define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16_commute(<vscale x 1 x bfloat> %v } define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16_unmasked(<vscale x 1 x bfloat> %va, bfloat %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv1bf16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmv.v.x v9, a1 -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8 -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma -; CHECK-NEXT: vfadd.vv v9, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv1bf16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vmv.v.x v9, a1 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v9 +; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFH-NEXT: vfadd.vv v9, v10, v8 +; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv1bf16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v9, a1 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v9 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v9, v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv1bf16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vmv.v.x v9, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v10, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 1 x bfloat> poison, bfloat %b, i32 0 %vb = shufflevector <vscale x 1 x bfloat> %elt.head, <vscale x 1 x bfloat> poison, <vscale x 1 x i32> zeroinitializer %v = call <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl) @@ -102,18 +227,44 @@ define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16_unmasked(<vscale x 1 x bfloat> % } define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16_unmasked_commute(<vscale x 1 x bfloat> %va, bfloat %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv1bf16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmv.v.x v9, a1 -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8 -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma -; CHECK-NEXT: vfadd.vv v9, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv1bf16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vmv.v.x v9, a1 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v9 +; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFH-NEXT: vfadd.vv v9, v8, v10 +; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv1bf16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v9, a1 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v9 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v9, v8, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv1bf16_unmasked_commute: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vmv.v.x v9, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v8, v10 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 1 x bfloat> poison, bfloat %b, i32 0 %vb = shufflevector <vscale x 1 x bfloat> %elt.head, <vscale x 1 x bfloat> poison, <vscale x 1 x i32> zeroinitializer %v = call <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat> %vb, <vscale x 1 x bfloat> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl) @@ -123,48 +274,118 @@ define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16_unmasked_commute(<vscale x 1 x b declare <vscale x 2 x bfloat> @llvm.vp.fadd.nxv2bf16(<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x i1>, i32) define <vscale x 2 x bfloat> @vfadd_vv_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vv_nxv2bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; CHECK-NEXT: vfadd.vv v9, v9, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv2bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFH-NEXT: vfadd.vv v9, v9, v10, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv2bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv2bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v9, v10, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t +; ZVFBFA-NEXT: ret %v = call <vscale x 2 x bfloat> @llvm.vp.fadd.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %b, <vscale x 2 x i1> %m, i32 %evl) ret <vscale x 2 x bfloat> %v } define <vscale x 2 x bfloat> @vfadd_vv_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vv_nxv2bf16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9 -; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; CHECK-NEXT: vfadd.vv v9, v9, v10 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv2bf16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFH-NEXT: vfadd.vv v9, v9, v10 +; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv2bf16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv2bf16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v9, v10 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %v = call <vscale x 2 x bfloat> @llvm.vp.fadd.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl) ret <vscale x 2 x bfloat> %v } define <vscale x 2 x bfloat> @vfadd_vf_nxv2bf16(<vscale x 2 x bfloat> %va, bfloat %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv2bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmv.v.x v9, a1 -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; CHECK-NEXT: vfadd.vv v9, v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv2bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vmv.v.x v9, a1 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFH-NEXT: vfadd.vv v9, v10, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv2bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v9, a1 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v9, v10, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv2bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; ZVFBFA-NEXT: vmv.v.x v9, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v10, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 2 x bfloat> poison, bfloat %b, i32 0 %vb = shufflevector <vscale x 2 x bfloat> %elt.head, <vscale x 2 x bfloat> poison, <vscale x 2 x i32> zeroinitializer %v = call <vscale x 2 x bfloat> @llvm.vp.fadd.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, <vscale x 2 x i1> %m, i32 %evl) @@ -172,18 +393,44 @@ define <vscale x 2 x bfloat> @vfadd_vf_nxv2bf16(<vscale x 2 x bfloat> %va, bfloa } define <vscale x 2 x bfloat> @vfadd_vf_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, bfloat %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv2bf16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmv.v.x v9, a1 -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8 -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; CHECK-NEXT: vfadd.vv v9, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv2bf16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vmv.v.x v9, a1 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v9 +; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFH-NEXT: vfadd.vv v9, v10, v8 +; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv2bf16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v9, a1 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v9 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v9, v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv2bf16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; ZVFBFA-NEXT: vmv.v.x v9, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v10, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 2 x bfloat> poison, bfloat %b, i32 0 %vb = shufflevector <vscale x 2 x bfloat> %elt.head, <vscale x 2 x bfloat> poison, <vscale x 2 x i32> zeroinitializer %v = call <vscale x 2 x bfloat> @llvm.vp.fadd.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl) @@ -193,48 +440,118 @@ define <vscale x 2 x bfloat> @vfadd_vf_nxv2bf16_unmasked(<vscale x 2 x bfloat> % declare <vscale x 4 x bfloat> @llvm.vp.fadd.nxv4bf16(<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x i1>, i32) define <vscale x 4 x bfloat> @vfadd_vv_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vv_nxv4bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma -; CHECK-NEXT: vfadd.vv v10, v12, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv4bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFH-NEXT: vfadd.vv v10, v12, v10, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv4bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v10, v12, v10, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv4bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v10, v12, v10, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m1, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10, v0.t +; ZVFBFA-NEXT: ret %v = call <vscale x 4 x bfloat> @llvm.vp.fadd.nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %b, <vscale x 4 x i1> %m, i32 %evl) ret <vscale x 4 x bfloat> %v } define <vscale x 4 x bfloat> @vfadd_vv_nxv4bf16_unmasked(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vv_nxv4bf16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9 -; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma -; CHECK-NEXT: vfadd.vv v10, v12, v10 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv4bf16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFH-NEXT: vfadd.vv v10, v12, v10 +; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv4bf16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v10, v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv4bf16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v10, v12, v10 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m1, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10 +; ZVFBFA-NEXT: ret %v = call <vscale x 4 x bfloat> @llvm.vp.fadd.nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl) ret <vscale x 4 x bfloat> %v } define <vscale x 4 x bfloat> @vfadd_vf_nxv4bf16(<vscale x 4 x bfloat> %va, bfloat %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv4bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmv.v.x v12, a1 -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma -; CHECK-NEXT: vfadd.vv v10, v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv4bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vmv.v.x v12, a1 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v12, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFH-NEXT: vfadd.vv v10, v10, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv4bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v12, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v10, v10, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv4bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; ZVFBFA-NEXT: vmv.v.x v12, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v12, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v10, v10, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m1, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10, v0.t +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 4 x bfloat> poison, bfloat %b, i32 0 %vb = shufflevector <vscale x 4 x bfloat> %elt.head, <vscale x 4 x bfloat> poison, <vscale x 4 x i32> zeroinitializer %v = call <vscale x 4 x bfloat> @llvm.vp.fadd.nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %vb, <vscale x 4 x i1> %m, i32 %evl) @@ -242,18 +559,44 @@ define <vscale x 4 x bfloat> @vfadd_vf_nxv4bf16(<vscale x 4 x bfloat> %va, bfloa } define <vscale x 4 x bfloat> @vfadd_vf_nxv4bf16_unmasked(<vscale x 4 x bfloat> %va, bfloat %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv4bf16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmv.v.x v12, a1 -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8 -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma -; CHECK-NEXT: vfadd.vv v10, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv4bf16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vmv.v.x v12, a1 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v12 +; ZVFH-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFH-NEXT: vfadd.vv v10, v10, v8 +; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv4bf16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v12 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v10, v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv4bf16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; ZVFBFA-NEXT: vmv.v.x v12, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v12 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v10, v10, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m1, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10 +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 4 x bfloat> poison, bfloat %b, i32 0 %vb = shufflevector <vscale x 4 x bfloat> %elt.head, <vscale x 4 x bfloat> poison, <vscale x 4 x i32> zeroinitializer %v = call <vscale x 4 x bfloat> @llvm.vp.fadd.nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl) @@ -263,48 +606,118 @@ define <vscale x 4 x bfloat> @vfadd_vf_nxv4bf16_unmasked(<vscale x 4 x bfloat> % declare <vscale x 8 x bfloat> @llvm.vp.fadd.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x i1>, i32) define <vscale x 8 x bfloat> @vfadd_vv_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vv_nxv8bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v10, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; CHECK-NEXT: vfadd.vv v12, v16, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv8bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v10, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFH-NEXT: vfadd.vv v12, v16, v12, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv8bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v10, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v12, v16, v12, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12, v0.t +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv8bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v10, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFBFA-NEXT: vfadd.vv v12, v16, v12, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12, v0.t +; ZVFBFA-NEXT: ret %v = call <vscale x 8 x bfloat> @llvm.vp.fadd.nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %b, <vscale x 8 x i1> %m, i32 %evl) ret <vscale x 8 x bfloat> %v } define <vscale x 8 x bfloat> @vfadd_vv_nxv8bf16_unmasked(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vv_nxv8bf16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v10 -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; CHECK-NEXT: vfadd.vv v12, v16, v12 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv8bf16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v10 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFH-NEXT: vfadd.vv v12, v16, v12 +; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv8bf16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v12, v16, v12 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv8bf16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFBFA-NEXT: vfadd.vv v12, v16, v12 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12 +; ZVFBFA-NEXT: ret %v = call <vscale x 8 x bfloat> @llvm.vp.fadd.nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl) ret <vscale x 8 x bfloat> %v } define <vscale x 8 x bfloat> @vfadd_vf_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv8bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmv.v.x v16, a1 -; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; CHECK-NEXT: vfadd.vv v12, v12, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv8bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vmv.v.x v16, a1 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v16, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFH-NEXT: vfadd.vv v12, v12, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv8bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v16, a1 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v16, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v12, v12, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12, v0.t +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv8bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vmv.v.x v16, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v16, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFBFA-NEXT: vfadd.vv v12, v12, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12, v0.t +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0 %vb = shufflevector <vscale x 8 x bfloat> %elt.head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer %v = call <vscale x 8 x bfloat> @llvm.vp.fadd.nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb, <vscale x 8 x i1> %m, i32 %evl) @@ -312,18 +725,44 @@ define <vscale x 8 x bfloat> @vfadd_vf_nxv8bf16(<vscale x 8 x bfloat> %va, bfloa } define <vscale x 8 x bfloat> @vfadd_vf_nxv8bf16_unmasked(<vscale x 8 x bfloat> %va, bfloat %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv8bf16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmv.v.x v16, a1 -; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v16 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; CHECK-NEXT: vfadd.vv v12, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv8bf16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vmv.v.x v16, a1 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v16 +; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFH-NEXT: vfadd.vv v12, v12, v8 +; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv8bf16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v16, a1 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v16 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v12, v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv8bf16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vmv.v.x v16, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v16 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFBFA-NEXT: vfadd.vv v12, v12, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12 +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0 %vb = shufflevector <vscale x 8 x bfloat> %elt.head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer %v = call <vscale x 8 x bfloat> @llvm.vp.fadd.nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl) @@ -333,48 +772,118 @@ define <vscale x 8 x bfloat> @vfadd_vf_nxv8bf16_unmasked(<vscale x 8 x bfloat> % declare <vscale x 16 x bfloat> @llvm.vp.fadd.nxv16bf16(<vscale x 16 x bfloat>, <vscale x 16 x bfloat>, <vscale x 16 x i1>, i32) define <vscale x 16 x bfloat> @vfadd_vv_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vv_nxv16bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v24, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv16bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v24, v16, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv16bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv16bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v24, v16, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t +; ZVFBFA-NEXT: ret %v = call <vscale x 16 x bfloat> @llvm.vp.fadd.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %b, <vscale x 16 x i1> %m, i32 %evl) ret <vscale x 16 x bfloat> %v } define <vscale x 16 x bfloat> @vfadd_vv_nxv16bf16_unmasked(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vv_nxv16bf16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12 -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v24, v16 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv16bf16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v24, v16 +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv16bf16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv16bf16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v24, v16 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16 +; ZVFBFA-NEXT: ret %v = call <vscale x 16 x bfloat> @llvm.vp.fadd.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl) ret <vscale x 16 x bfloat> %v } define <vscale x 16 x bfloat> @vfadd_vf_nxv16bf16(<vscale x 16 x bfloat> %va, bfloat %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv16bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmv.v.x v24, a1 -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv16bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vmv.v.x v24, a1 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v24, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v16, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv16bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v24, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v16, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv16bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vmv.v.x v24, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v24, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 16 x bfloat> poison, bfloat %b, i32 0 %vb = shufflevector <vscale x 16 x bfloat> %elt.head, <vscale x 16 x bfloat> poison, <vscale x 16 x i32> zeroinitializer %v = call <vscale x 16 x bfloat> @llvm.vp.fadd.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %vb, <vscale x 16 x i1> %m, i32 %evl) @@ -382,18 +891,44 @@ define <vscale x 16 x bfloat> @vfadd_vf_nxv16bf16(<vscale x 16 x bfloat> %va, bf } define <vscale x 16 x bfloat> @vfadd_vf_nxv16bf16_unmasked(<vscale x 16 x bfloat> %va, bfloat %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv16bf16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmv.v.x v24, a1 -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v24 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv16bf16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vmv.v.x v24, a1 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v24 +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v16, v8 +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv16bf16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v24 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv16bf16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vmv.v.x v24, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v24 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16 +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 16 x bfloat> poison, bfloat %b, i32 0 %vb = shufflevector <vscale x 16 x bfloat> %elt.head, <vscale x 16 x bfloat> poison, <vscale x 16 x i32> zeroinitializer %v = call <vscale x 16 x bfloat> @llvm.vp.fadd.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl) @@ -403,173 +938,493 @@ define <vscale x 16 x bfloat> @vfadd_vf_nxv16bf16_unmasked(<vscale x 16 x bfloat declare <vscale x 32 x bfloat> @llvm.vp.fadd.nxv32bf16(<vscale x 32 x bfloat>, <vscale x 32 x bfloat>, <vscale x 32 x i1>, i32) define <vscale x 32 x bfloat> @vfadd_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vv_nxv32bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma -; CHECK-NEXT: vmv1r.v v7, v0 -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a1, a2, 1 -; CHECK-NEXT: srli a2, a2, 2 -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v0, v0, a2 -; CHECK-NEXT: sltu a2, a0, a3 -; CHECK-NEXT: addi a2, a2, -1 -; CHECK-NEXT: and a2, a2, a3 -; CHECK-NEXT: addi a3, sp, 16 -; CHECK-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v16, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB22_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB22_2: -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v24, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v24, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add sp, sp, a0 -; CHECK-NEXT: .cfi_def_cfa sp, 16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: .cfi_def_cfa_offset 0 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv32bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: addi sp, sp, -16 +; ZVFH-NEXT: .cfi_def_cfa_offset 16 +; ZVFH-NEXT: csrr a1, vlenb +; ZVFH-NEXT: slli a1, a1, 3 +; ZVFH-NEXT: sub sp, sp, a1 +; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFH-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; ZVFH-NEXT: vmv1r.v v7, v0 +; ZVFH-NEXT: csrr a2, vlenb +; ZVFH-NEXT: slli a1, a2, 1 +; ZVFH-NEXT: srli a2, a2, 2 +; ZVFH-NEXT: sub a3, a0, a1 +; ZVFH-NEXT: vslidedown.vx v0, v0, a2 +; ZVFH-NEXT: sltu a2, a0, a3 +; ZVFH-NEXT: addi a2, a2, -1 +; ZVFH-NEXT: and a2, a2, a3 +; ZVFH-NEXT: addi a3, sp, 16 +; ZVFH-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; ZVFH-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t +; ZVFH-NEXT: bltu a0, a1, .LBB22_2 +; ZVFH-NEXT: # %bb.1: +; ZVFH-NEXT: mv a0, a1 +; ZVFH-NEXT: .LBB22_2: +; ZVFH-NEXT: vmv1r.v v0, v7 +; ZVFH-NEXT: addi a1, sp, 16 +; ZVFH-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v24, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v24, v16, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t +; ZVFH-NEXT: csrr a0, vlenb +; ZVFH-NEXT: slli a0, a0, 3 +; ZVFH-NEXT: add sp, sp, a0 +; ZVFH-NEXT: .cfi_def_cfa sp, 16 +; ZVFH-NEXT: addi sp, sp, 16 +; ZVFH-NEXT: .cfi_def_cfa_offset 0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv32bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vmv1r.v v7, v0 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2 +; ZVFHMIN-NEXT: sltu a2, a0, a3 +; ZVFHMIN-NEXT: addi a2, a2, -1 +; ZVFHMIN-NEXT: and a2, a2, a3 +; ZVFHMIN-NEXT: addi a3, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t +; ZVFHMIN-NEXT: bltu a0, a1, .LBB22_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB22_2: +; ZVFHMIN-NEXT: vmv1r.v v0, v7 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v24, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv32bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: addi sp, sp, -16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 16 +; ZVFBFA-NEXT: csrr a1, vlenb +; ZVFBFA-NEXT: slli a1, a1, 3 +; ZVFBFA-NEXT: sub sp, sp, a1 +; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFBFA-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; ZVFBFA-NEXT: vmv1r.v v7, v0 +; ZVFBFA-NEXT: csrr a2, vlenb +; ZVFBFA-NEXT: slli a1, a2, 1 +; ZVFBFA-NEXT: srli a2, a2, 2 +; ZVFBFA-NEXT: sub a3, a0, a1 +; ZVFBFA-NEXT: vslidedown.vx v0, v0, a2 +; ZVFBFA-NEXT: sltu a2, a0, a3 +; ZVFBFA-NEXT: addi a2, a2, -1 +; ZVFBFA-NEXT: and a2, a2, a3 +; ZVFBFA-NEXT: addi a3, sp, 16 +; ZVFBFA-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; ZVFBFA-NEXT: vsetvli zero, a2, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t +; ZVFBFA-NEXT: bltu a0, a1, .LBB22_2 +; ZVFBFA-NEXT: # %bb.1: +; ZVFBFA-NEXT: mv a0, a1 +; ZVFBFA-NEXT: .LBB22_2: +; ZVFBFA-NEXT: vmv1r.v v0, v7 +; ZVFBFA-NEXT: addi a1, sp, 16 +; ZVFBFA-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v24, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v24, v16, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 3 +; ZVFBFA-NEXT: add sp, sp, a0 +; ZVFBFA-NEXT: .cfi_def_cfa sp, 16 +; ZVFBFA-NEXT: addi sp, sp, 16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 0 +; ZVFBFA-NEXT: ret %v = call <vscale x 32 x bfloat> @llvm.vp.fadd.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %b, <vscale x 32 x i1> %m, i32 %evl) ret <vscale x 32 x bfloat> %v } define <vscale x 32 x bfloat> @vfadd_vv_nxv32bf16_unmasked(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vv_nxv32bf16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma -; CHECK-NEXT: vmset.m v24 -; CHECK-NEXT: slli a1, a2, 1 -; CHECK-NEXT: srli a2, a2, 2 -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma -; CHECK-NEXT: vslidedown.vx v0, v24, a2 -; CHECK-NEXT: sltu a2, a0, a3 -; CHECK-NEXT: addi a2, a2, -1 -; CHECK-NEXT: and a2, a2, a3 -; CHECK-NEXT: addi a3, sp, 16 -; CHECK-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v16, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB23_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB23_2: -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v24 -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v24, v16 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add sp, sp, a0 -; CHECK-NEXT: .cfi_def_cfa sp, 16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: .cfi_def_cfa_offset 0 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv32bf16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: addi sp, sp, -16 +; ZVFH-NEXT: .cfi_def_cfa_offset 16 +; ZVFH-NEXT: csrr a1, vlenb +; ZVFH-NEXT: slli a1, a1, 3 +; ZVFH-NEXT: sub sp, sp, a1 +; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFH-NEXT: csrr a2, vlenb +; ZVFH-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; ZVFH-NEXT: vmset.m v24 +; ZVFH-NEXT: slli a1, a2, 1 +; ZVFH-NEXT: srli a2, a2, 2 +; ZVFH-NEXT: sub a3, a0, a1 +; ZVFH-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFH-NEXT: vslidedown.vx v0, v24, a2 +; ZVFH-NEXT: sltu a2, a0, a3 +; ZVFH-NEXT: addi a2, a2, -1 +; ZVFH-NEXT: and a2, a2, a3 +; ZVFH-NEXT: addi a3, sp, 16 +; ZVFH-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; ZVFH-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t +; ZVFH-NEXT: bltu a0, a1, .LBB23_2 +; ZVFH-NEXT: # %bb.1: +; ZVFH-NEXT: mv a0, a1 +; ZVFH-NEXT: .LBB23_2: +; ZVFH-NEXT: addi a1, sp, 16 +; ZVFH-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v24 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v24, v16 +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16 +; ZVFH-NEXT: csrr a0, vlenb +; ZVFH-NEXT: slli a0, a0, 3 +; ZVFH-NEXT: add sp, sp, a0 +; ZVFH-NEXT: .cfi_def_cfa sp, 16 +; ZVFH-NEXT: addi sp, sp, 16 +; ZVFH-NEXT: .cfi_def_cfa_offset 0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv32bf16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; ZVFHMIN-NEXT: vmset.m v24 +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2 +; ZVFHMIN-NEXT: sltu a2, a0, a3 +; ZVFHMIN-NEXT: addi a2, a2, -1 +; ZVFHMIN-NEXT: and a2, a2, a3 +; ZVFHMIN-NEXT: addi a3, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t +; ZVFHMIN-NEXT: bltu a0, a1, .LBB23_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB23_2: +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v24 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv32bf16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: addi sp, sp, -16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 16 +; ZVFBFA-NEXT: csrr a1, vlenb +; ZVFBFA-NEXT: slli a1, a1, 3 +; ZVFBFA-NEXT: sub sp, sp, a1 +; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFBFA-NEXT: csrr a2, vlenb +; ZVFBFA-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; ZVFBFA-NEXT: vmset.m v24 +; ZVFBFA-NEXT: slli a1, a2, 1 +; ZVFBFA-NEXT: srli a2, a2, 2 +; ZVFBFA-NEXT: sub a3, a0, a1 +; ZVFBFA-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFBFA-NEXT: vslidedown.vx v0, v24, a2 +; ZVFBFA-NEXT: sltu a2, a0, a3 +; ZVFBFA-NEXT: addi a2, a2, -1 +; ZVFBFA-NEXT: and a2, a2, a3 +; ZVFBFA-NEXT: addi a3, sp, 16 +; ZVFBFA-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; ZVFBFA-NEXT: vsetvli zero, a2, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t +; ZVFBFA-NEXT: bltu a0, a1, .LBB23_2 +; ZVFBFA-NEXT: # %bb.1: +; ZVFBFA-NEXT: mv a0, a1 +; ZVFBFA-NEXT: .LBB23_2: +; ZVFBFA-NEXT: addi a1, sp, 16 +; ZVFBFA-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v24 +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v24, v16 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16 +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 3 +; ZVFBFA-NEXT: add sp, sp, a0 +; ZVFBFA-NEXT: .cfi_def_cfa sp, 16 +; ZVFBFA-NEXT: addi sp, sp, 16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 0 +; ZVFBFA-NEXT: ret %v = call <vscale x 32 x bfloat> @llvm.vp.fadd.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl) ret <vscale x 32 x bfloat> %v } define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bfloat %b, <vscale x 32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv32bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 -; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma -; CHECK-NEXT: vmv1r.v v7, v0 -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: vmv.v.x v24, a1 -; CHECK-NEXT: slli a1, a2, 1 -; CHECK-NEXT: srli a2, a2, 2 -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma -; CHECK-NEXT: vslidedown.vx v0, v0, a2 -; CHECK-NEXT: sltu a2, a0, a3 -; CHECK-NEXT: addi a2, a2, -1 -; CHECK-NEXT: and a2, a2, a3 -; CHECK-NEXT: csrr a3, vlenb -; CHECK-NEXT: slli a3, a3, 3 -; CHECK-NEXT: add a3, sp, a3 -; CHECK-NEXT: addi a3, a3, 16 -; CHECK-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v28, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v24, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB24_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB24_2: -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v16, v0.t -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v16, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 -; CHECK-NEXT: add sp, sp, a0 -; CHECK-NEXT: .cfi_def_cfa sp, 16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: .cfi_def_cfa_offset 0 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv32bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: addi sp, sp, -16 +; ZVFH-NEXT: .cfi_def_cfa_offset 16 +; ZVFH-NEXT: csrr a1, vlenb +; ZVFH-NEXT: slli a1, a1, 4 +; ZVFH-NEXT: sub sp, sp, a1 +; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; ZVFH-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; ZVFH-NEXT: vmv1r.v v7, v0 +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: csrr a2, vlenb +; ZVFH-NEXT: vmv.v.x v24, a1 +; ZVFH-NEXT: slli a1, a2, 1 +; ZVFH-NEXT: srli a2, a2, 2 +; ZVFH-NEXT: sub a3, a0, a1 +; ZVFH-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFH-NEXT: vslidedown.vx v0, v0, a2 +; ZVFH-NEXT: sltu a2, a0, a3 +; ZVFH-NEXT: addi a2, a2, -1 +; ZVFH-NEXT: and a2, a2, a3 +; ZVFH-NEXT: csrr a3, vlenb +; ZVFH-NEXT: slli a3, a3, 3 +; ZVFH-NEXT: add a3, sp, a3 +; ZVFH-NEXT: addi a3, a3, 16 +; ZVFH-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill +; ZVFH-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v28, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v12, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v24, v16, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t +; ZVFH-NEXT: bltu a0, a1, .LBB24_2 +; ZVFH-NEXT: # %bb.1: +; ZVFH-NEXT: mv a0, a1 +; ZVFH-NEXT: .LBB24_2: +; ZVFH-NEXT: vmv1r.v v0, v7 +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t +; ZVFH-NEXT: addi a0, sp, 16 +; ZVFH-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill +; ZVFH-NEXT: csrr a0, vlenb +; ZVFH-NEXT: slli a0, a0, 3 +; ZVFH-NEXT: add a0, sp, a0 +; ZVFH-NEXT: addi a0, a0, 16 +; ZVFH-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v16, v0.t +; ZVFH-NEXT: addi a0, sp, 16 +; ZVFH-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t +; ZVFH-NEXT: csrr a0, vlenb +; ZVFH-NEXT: slli a0, a0, 4 +; ZVFH-NEXT: add sp, sp, a0 +; ZVFH-NEXT: .cfi_def_cfa sp, 16 +; ZVFH-NEXT: addi sp, sp, 16 +; ZVFH-NEXT: .cfi_def_cfa_offset 0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv32bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv1r.v v7, v0 +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2 +; ZVFHMIN-NEXT: sltu a2, a0, a3 +; ZVFHMIN-NEXT: addi a2, a2, -1 +; ZVFHMIN-NEXT: and a2, a2, a3 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 3 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v28, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v12, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t +; ZVFHMIN-NEXT: bltu a0, a1, .LBB24_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB24_2: +; ZVFHMIN-NEXT: vmv1r.v v0, v7 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v16, v0.t +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 4 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv32bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: addi sp, sp, -16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 16 +; ZVFBFA-NEXT: csrr a1, vlenb +; ZVFBFA-NEXT: slli a1, a1, 4 +; ZVFBFA-NEXT: sub sp, sp, a1 +; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; ZVFBFA-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; ZVFBFA-NEXT: vmv1r.v v7, v0 +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: csrr a2, vlenb +; ZVFBFA-NEXT: vmv.v.x v24, a1 +; ZVFBFA-NEXT: slli a1, a2, 1 +; ZVFBFA-NEXT: srli a2, a2, 2 +; ZVFBFA-NEXT: sub a3, a0, a1 +; ZVFBFA-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFBFA-NEXT: vslidedown.vx v0, v0, a2 +; ZVFBFA-NEXT: sltu a2, a0, a3 +; ZVFBFA-NEXT: addi a2, a2, -1 +; ZVFBFA-NEXT: and a2, a2, a3 +; ZVFBFA-NEXT: csrr a3, vlenb +; ZVFBFA-NEXT: slli a3, a3, 3 +; ZVFBFA-NEXT: add a3, sp, a3 +; ZVFBFA-NEXT: addi a3, a3, 16 +; ZVFBFA-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill +; ZVFBFA-NEXT: vsetvli zero, a2, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v28, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v12, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v24, v16, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t +; ZVFBFA-NEXT: bltu a0, a1, .LBB24_2 +; ZVFBFA-NEXT: # %bb.1: +; ZVFBFA-NEXT: mv a0, a1 +; ZVFBFA-NEXT: .LBB24_2: +; ZVFBFA-NEXT: vmv1r.v v0, v7 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8, v0.t +; ZVFBFA-NEXT: addi a0, sp, 16 +; ZVFBFA-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 3 +; ZVFBFA-NEXT: add a0, sp, a0 +; ZVFBFA-NEXT: addi a0, a0, 16 +; ZVFBFA-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v16, v0.t +; ZVFBFA-NEXT: addi a0, sp, 16 +; ZVFBFA-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 4 +; ZVFBFA-NEXT: add sp, sp, a0 +; ZVFBFA-NEXT: .cfi_def_cfa sp, 16 +; ZVFBFA-NEXT: addi sp, sp, 16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 0 +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 32 x bfloat> poison, bfloat %b, i32 0 %vb = shufflevector <vscale x 32 x bfloat> %elt.head, <vscale x 32 x bfloat> poison, <vscale x 32 x i32> zeroinitializer %v = call <vscale x 32 x bfloat> @llvm.vp.fadd.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %vb, <vscale x 32 x i1> %m, i32 %evl) @@ -577,56 +1432,158 @@ define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bf } define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16_unmasked(<vscale x 32 x bfloat> %va, bfloat %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv32bf16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: vsetvli a3, zero, e16, m8, ta, ma -; CHECK-NEXT: vmset.m v24 -; CHECK-NEXT: vmv.v.x v16, a1 -; CHECK-NEXT: slli a1, a2, 1 -; CHECK-NEXT: srli a2, a2, 2 -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma -; CHECK-NEXT: vslidedown.vx v0, v24, a2 -; CHECK-NEXT: sltu a2, a0, a3 -; CHECK-NEXT: addi a2, a2, -1 -; CHECK-NEXT: and a2, a2, a3 -; CHECK-NEXT: addi a3, sp, 16 -; CHECK-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v16, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB25_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB25_2: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v0 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v16, v24 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add sp, sp, a0 -; CHECK-NEXT: .cfi_def_cfa sp, 16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: .cfi_def_cfa_offset 0 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv32bf16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: addi sp, sp, -16 +; ZVFH-NEXT: .cfi_def_cfa_offset 16 +; ZVFH-NEXT: csrr a1, vlenb +; ZVFH-NEXT: slli a1, a1, 3 +; ZVFH-NEXT: sub sp, sp, a1 +; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: csrr a2, vlenb +; ZVFH-NEXT: vsetvli a3, zero, e16, m8, ta, ma +; ZVFH-NEXT: vmset.m v24 +; ZVFH-NEXT: vmv.v.x v16, a1 +; ZVFH-NEXT: slli a1, a2, 1 +; ZVFH-NEXT: srli a2, a2, 2 +; ZVFH-NEXT: sub a3, a0, a1 +; ZVFH-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFH-NEXT: vslidedown.vx v0, v24, a2 +; ZVFH-NEXT: sltu a2, a0, a3 +; ZVFH-NEXT: addi a2, a2, -1 +; ZVFH-NEXT: and a2, a2, a3 +; ZVFH-NEXT: addi a3, sp, 16 +; ZVFH-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; ZVFH-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t +; ZVFH-NEXT: bltu a0, a1, .LBB25_2 +; ZVFH-NEXT: # %bb.1: +; ZVFH-NEXT: mv a0, a1 +; ZVFH-NEXT: .LBB25_2: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8 +; ZVFH-NEXT: addi a0, sp, 16 +; ZVFH-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v0 +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v16, v24 +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16 +; ZVFH-NEXT: csrr a0, vlenb +; ZVFH-NEXT: slli a0, a0, 3 +; ZVFH-NEXT: add sp, sp, a0 +; ZVFH-NEXT: .cfi_def_cfa sp, 16 +; ZVFH-NEXT: addi sp, sp, 16 +; ZVFH-NEXT: .cfi_def_cfa_offset 0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv32bf16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmset.m v24 +; ZVFHMIN-NEXT: vmv.v.x v16, a1 +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2 +; ZVFHMIN-NEXT: sltu a2, a0, a3 +; ZVFHMIN-NEXT: addi a2, a2, -1 +; ZVFHMIN-NEXT: and a2, a2, a3 +; ZVFHMIN-NEXT: addi a3, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t +; ZVFHMIN-NEXT: bltu a0, a1, .LBB25_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB25_2: +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8 +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v0 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv32bf16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: addi sp, sp, -16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 16 +; ZVFBFA-NEXT: csrr a1, vlenb +; ZVFBFA-NEXT: slli a1, a1, 3 +; ZVFBFA-NEXT: sub sp, sp, a1 +; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: csrr a2, vlenb +; ZVFBFA-NEXT: vsetvli a3, zero, e16, m8, ta, ma +; ZVFBFA-NEXT: vmset.m v24 +; ZVFBFA-NEXT: vmv.v.x v16, a1 +; ZVFBFA-NEXT: slli a1, a2, 1 +; ZVFBFA-NEXT: srli a2, a2, 2 +; ZVFBFA-NEXT: sub a3, a0, a1 +; ZVFBFA-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFBFA-NEXT: vslidedown.vx v0, v24, a2 +; ZVFBFA-NEXT: sltu a2, a0, a3 +; ZVFBFA-NEXT: addi a2, a2, -1 +; ZVFBFA-NEXT: and a2, a2, a3 +; ZVFBFA-NEXT: addi a3, sp, 16 +; ZVFBFA-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; ZVFBFA-NEXT: vsetvli zero, a2, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t +; ZVFBFA-NEXT: bltu a0, a1, .LBB25_2 +; ZVFBFA-NEXT: # %bb.1: +; ZVFBFA-NEXT: mv a0, a1 +; ZVFBFA-NEXT: .LBB25_2: +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFBFA-NEXT: addi a0, sp, 16 +; ZVFBFA-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v0 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v24 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16 +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 3 +; ZVFBFA-NEXT: add sp, sp, a0 +; ZVFBFA-NEXT: .cfi_def_cfa sp, 16 +; ZVFBFA-NEXT: addi sp, sp, 16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 0 +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 32 x bfloat> poison, bfloat %b, i32 0 %vb = shufflevector <vscale x 32 x bfloat> %elt.head, <vscale x 32 x bfloat> poison, <vscale x 32 x i32> zeroinitializer %v = call <vscale x 32 x bfloat> @llvm.vp.fadd.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl) @@ -651,6 +1608,17 @@ define <vscale x 1 x half> @vfadd_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9, v0.t ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv1f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v9, v10, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t +; ZVFBFA-NEXT: ret %v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl) ret <vscale x 1 x half> %v } @@ -672,6 +1640,17 @@ define <vscale x 1 x half> @vfadd_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, < ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv1f16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v9, v10 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl) ret <vscale x 1 x half> %v } @@ -695,6 +1674,19 @@ define <vscale x 1 x half> @vfadd_vf_nxv1f16(<vscale x 1 x half> %va, half %b, < ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9, v0.t ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv1f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFBFA-NEXT: vmv.v.x v9, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v10, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl) @@ -720,6 +1712,19 @@ define <vscale x 1 x half> @vfadd_vf_nxv1f16_commute(<vscale x 1 x half> %va, ha ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9, v0.t ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv1f16_commute: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFBFA-NEXT: vmv.v.x v9, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v8, v10, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl) @@ -745,6 +1750,19 @@ define <vscale x 1 x half> @vfadd_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, h ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv1f16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFBFA-NEXT: vmv.v.x v9, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v10, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl) @@ -770,6 +1788,19 @@ define <vscale x 1 x half> @vfadd_vf_nxv1f16_unmasked_commute(<vscale x 1 x half ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv1f16_unmasked_commute: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFBFA-NEXT: vmv.v.x v9, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v8, v10 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl) @@ -795,6 +1826,17 @@ define <vscale x 2 x half> @vfadd_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9, v0.t ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv2f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v9, v10, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t +; ZVFBFA-NEXT: ret %v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl) ret <vscale x 2 x half> %v } @@ -816,6 +1858,17 @@ define <vscale x 2 x half> @vfadd_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, < ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv2f16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v9, v10 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl) ret <vscale x 2 x half> %v } @@ -839,6 +1892,19 @@ define <vscale x 2 x half> @vfadd_vf_nxv2f16(<vscale x 2 x half> %va, half %b, < ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9, v0.t ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv2f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFBFA-NEXT: vmv.v.x v9, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v10, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl) @@ -864,6 +1930,19 @@ define <vscale x 2 x half> @vfadd_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, h ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv2f16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFBFA-NEXT: vmv.v.x v9, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v10, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl) @@ -889,6 +1968,17 @@ define <vscale x 4 x half> @vfadd_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10, v0.t ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv4f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v10, v12, v10, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10, v0.t +; ZVFBFA-NEXT: ret %v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl) ret <vscale x 4 x half> %v } @@ -910,6 +2000,17 @@ define <vscale x 4 x half> @vfadd_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, < ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv4f16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v10, v12, v10 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10 +; ZVFBFA-NEXT: ret %v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl) ret <vscale x 4 x half> %v } @@ -933,6 +2034,19 @@ define <vscale x 4 x half> @vfadd_vf_nxv4f16(<vscale x 4 x half> %va, half %b, < ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10, v0.t ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv4f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFBFA-NEXT: vmv.v.x v12, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v12, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v10, v10, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10, v0.t +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl) @@ -958,6 +2072,19 @@ define <vscale x 4 x half> @vfadd_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, h ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv4f16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFBFA-NEXT: vmv.v.x v12, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v12 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v10, v10, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10 +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl) @@ -983,6 +2110,17 @@ define <vscale x 8 x half> @vfadd_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12, v0.t ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv8f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v10, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFBFA-NEXT: vfadd.vv v12, v16, v12, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12, v0.t +; ZVFBFA-NEXT: ret %v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl) ret <vscale x 8 x half> %v } @@ -1004,6 +2142,17 @@ define <vscale x 8 x half> @vfadd_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, < ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv8f16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFBFA-NEXT: vfadd.vv v12, v16, v12 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12 +; ZVFBFA-NEXT: ret %v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl) ret <vscale x 8 x half> %v } @@ -1027,6 +2176,19 @@ define <vscale x 8 x half> @vfadd_vf_nxv8f16(<vscale x 8 x half> %va, half %b, < ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12, v0.t ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv8f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFBFA-NEXT: vmv.v.x v16, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v16, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFBFA-NEXT: vfadd.vv v12, v12, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12, v0.t +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl) @@ -1052,6 +2214,19 @@ define <vscale x 8 x half> @vfadd_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, h ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv8f16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFBFA-NEXT: vmv.v.x v16, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v16 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFBFA-NEXT: vfadd.vv v12, v12, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12 +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl) @@ -1077,6 +2252,17 @@ define <vscale x 16 x half> @vfadd_vv_nxv16f16(<vscale x 16 x half> %va, <vscale ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16, v0.t ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv16f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v24, v16, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t +; ZVFBFA-NEXT: ret %v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl) ret <vscale x 16 x half> %v } @@ -1098,6 +2284,17 @@ define <vscale x 16 x half> @vfadd_vv_nxv16f16_unmasked(<vscale x 16 x half> %va ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv16f16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v24, v16 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16 +; ZVFBFA-NEXT: ret %v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl) ret <vscale x 16 x half> %v } @@ -1121,6 +2318,19 @@ define <vscale x 16 x half> @vfadd_vf_nxv16f16(<vscale x 16 x half> %va, half %b ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16, v0.t ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv16f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFBFA-NEXT: vmv.v.x v24, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v24, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl) @@ -1146,6 +2356,19 @@ define <vscale x 16 x half> @vfadd_vf_nxv16f16_unmasked(<vscale x 16 x half> %va ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv16f16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFBFA-NEXT: vmv.v.x v24, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v24 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16 +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl) @@ -1209,6 +2432,55 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale ; ZVFHMIN-NEXT: addi sp, sp, 16 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv32f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: addi sp, sp, -16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 16 +; ZVFBFA-NEXT: csrr a1, vlenb +; ZVFBFA-NEXT: slli a1, a1, 3 +; ZVFBFA-NEXT: sub sp, sp, a1 +; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFBFA-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; ZVFBFA-NEXT: vmv1r.v v7, v0 +; ZVFBFA-NEXT: csrr a2, vlenb +; ZVFBFA-NEXT: slli a1, a2, 1 +; ZVFBFA-NEXT: srli a2, a2, 2 +; ZVFBFA-NEXT: sub a3, a0, a1 +; ZVFBFA-NEXT: vslidedown.vx v0, v0, a2 +; ZVFBFA-NEXT: sltu a2, a0, a3 +; ZVFBFA-NEXT: addi a2, a2, -1 +; ZVFBFA-NEXT: and a2, a2, a3 +; ZVFBFA-NEXT: addi a3, sp, 16 +; ZVFBFA-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; ZVFBFA-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t +; ZVFBFA-NEXT: bltu a0, a1, .LBB48_2 +; ZVFBFA-NEXT: # %bb.1: +; ZVFBFA-NEXT: mv a0, a1 +; ZVFBFA-NEXT: .LBB48_2: +; ZVFBFA-NEXT: vmv1r.v v0, v7 +; ZVFBFA-NEXT: addi a1, sp, 16 +; ZVFBFA-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v24, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v24, v16, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 3 +; ZVFBFA-NEXT: add sp, sp, a0 +; ZVFBFA-NEXT: .cfi_def_cfa sp, 16 +; ZVFBFA-NEXT: addi sp, sp, 16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 0 +; ZVFBFA-NEXT: ret %v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl) ret <vscale x 32 x half> %v } @@ -1268,6 +2540,55 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %va ; ZVFHMIN-NEXT: addi sp, sp, 16 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv32f16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: addi sp, sp, -16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 16 +; ZVFBFA-NEXT: csrr a1, vlenb +; ZVFBFA-NEXT: slli a1, a1, 3 +; ZVFBFA-NEXT: sub sp, sp, a1 +; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFBFA-NEXT: csrr a2, vlenb +; ZVFBFA-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; ZVFBFA-NEXT: vmset.m v24 +; ZVFBFA-NEXT: slli a1, a2, 1 +; ZVFBFA-NEXT: srli a2, a2, 2 +; ZVFBFA-NEXT: sub a3, a0, a1 +; ZVFBFA-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFBFA-NEXT: vslidedown.vx v0, v24, a2 +; ZVFBFA-NEXT: sltu a2, a0, a3 +; ZVFBFA-NEXT: addi a2, a2, -1 +; ZVFBFA-NEXT: and a2, a2, a3 +; ZVFBFA-NEXT: addi a3, sp, 16 +; ZVFBFA-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; ZVFBFA-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t +; ZVFBFA-NEXT: bltu a0, a1, .LBB49_2 +; ZVFBFA-NEXT: # %bb.1: +; ZVFBFA-NEXT: mv a0, a1 +; ZVFBFA-NEXT: .LBB49_2: +; ZVFBFA-NEXT: addi a1, sp, 16 +; ZVFBFA-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v24 +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v24, v16 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16 +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 3 +; ZVFBFA-NEXT: add sp, sp, a0 +; ZVFBFA-NEXT: .cfi_def_cfa sp, 16 +; ZVFBFA-NEXT: addi sp, sp, 16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 0 +; ZVFBFA-NEXT: ret %v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl) ret <vscale x 32 x half> %v } @@ -1340,6 +2661,68 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b ; ZVFHMIN-NEXT: addi sp, sp, 16 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv32f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: addi sp, sp, -16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 16 +; ZVFBFA-NEXT: csrr a1, vlenb +; ZVFBFA-NEXT: slli a1, a1, 4 +; ZVFBFA-NEXT: sub sp, sp, a1 +; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; ZVFBFA-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; ZVFBFA-NEXT: vmv1r.v v7, v0 +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: csrr a2, vlenb +; ZVFBFA-NEXT: vmv.v.x v24, a1 +; ZVFBFA-NEXT: slli a1, a2, 1 +; ZVFBFA-NEXT: srli a2, a2, 2 +; ZVFBFA-NEXT: sub a3, a0, a1 +; ZVFBFA-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFBFA-NEXT: vslidedown.vx v0, v0, a2 +; ZVFBFA-NEXT: sltu a2, a0, a3 +; ZVFBFA-NEXT: addi a2, a2, -1 +; ZVFBFA-NEXT: and a2, a2, a3 +; ZVFBFA-NEXT: csrr a3, vlenb +; ZVFBFA-NEXT: slli a3, a3, 3 +; ZVFBFA-NEXT: add a3, sp, a3 +; ZVFBFA-NEXT: addi a3, a3, 16 +; ZVFBFA-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill +; ZVFBFA-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v28, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v12, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v24, v16, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t +; ZVFBFA-NEXT: bltu a0, a1, .LBB50_2 +; ZVFBFA-NEXT: # %bb.1: +; ZVFBFA-NEXT: mv a0, a1 +; ZVFBFA-NEXT: .LBB50_2: +; ZVFBFA-NEXT: vmv1r.v v0, v7 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8, v0.t +; ZVFBFA-NEXT: addi a0, sp, 16 +; ZVFBFA-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 3 +; ZVFBFA-NEXT: add a0, sp, a0 +; ZVFBFA-NEXT: addi a0, a0, 16 +; ZVFBFA-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v16, v0.t +; ZVFBFA-NEXT: addi a0, sp, 16 +; ZVFBFA-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 4 +; ZVFBFA-NEXT: add sp, sp, a0 +; ZVFBFA-NEXT: .cfi_def_cfa sp, 16 +; ZVFBFA-NEXT: addi sp, sp, 16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 0 +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl) @@ -1403,6 +2786,57 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %va ; ZVFHMIN-NEXT: addi sp, sp, 16 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv32f16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: addi sp, sp, -16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 16 +; ZVFBFA-NEXT: csrr a1, vlenb +; ZVFBFA-NEXT: slli a1, a1, 3 +; ZVFBFA-NEXT: sub sp, sp, a1 +; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: csrr a2, vlenb +; ZVFBFA-NEXT: vsetvli a3, zero, e16, m8, ta, ma +; ZVFBFA-NEXT: vmset.m v24 +; ZVFBFA-NEXT: vmv.v.x v16, a1 +; ZVFBFA-NEXT: slli a1, a2, 1 +; ZVFBFA-NEXT: srli a2, a2, 2 +; ZVFBFA-NEXT: sub a3, a0, a1 +; ZVFBFA-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFBFA-NEXT: vslidedown.vx v0, v24, a2 +; ZVFBFA-NEXT: sltu a2, a0, a3 +; ZVFBFA-NEXT: addi a2, a2, -1 +; ZVFBFA-NEXT: and a2, a2, a3 +; ZVFBFA-NEXT: addi a3, sp, 16 +; ZVFBFA-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; ZVFBFA-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t +; ZVFBFA-NEXT: bltu a0, a1, .LBB51_2 +; ZVFBFA-NEXT: # %bb.1: +; ZVFBFA-NEXT: mv a0, a1 +; ZVFBFA-NEXT: .LBB51_2: +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFBFA-NEXT: addi a0, sp, 16 +; ZVFBFA-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v0 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v24 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16 +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 3 +; ZVFBFA-NEXT: add sp, sp, a0 +; ZVFBFA-NEXT: .cfi_def_cfa sp, 16 +; ZVFBFA-NEXT: addi sp, sp, 16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 0 +; ZVFBFA-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl) diff --git a/llvm/test/Transforms/ADCE/2016-09-06.ll b/llvm/test/Transforms/ADCE/2016-09-06.ll index 850f412..1329ac6 100644 --- a/llvm/test/Transforms/ADCE/2016-09-06.ll +++ b/llvm/test/Transforms/ADCE/2016-09-06.ll @@ -5,7 +5,7 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" ; Function Attrs: nounwind uwtable -define i32 @foo(i32, i32, i32) #0 { +define i32 @foo(i32, i32, i32) { %4 = alloca i32, align 4 %5 = alloca i32, align 4 %6 = alloca i32, align 4 @@ -48,8 +48,6 @@ B21: ret i32 %I22 } -attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } - !llvm.ident = !{!0} !0 = !{!"clang version 4.0.0"} diff --git a/llvm/test/Transforms/ADCE/blocks-with-dead-term-nondeterministic.ll b/llvm/test/Transforms/ADCE/blocks-with-dead-term-nondeterministic.ll index 9708be9..5e844b4 100644 --- a/llvm/test/Transforms/ADCE/blocks-with-dead-term-nondeterministic.ll +++ b/llvm/test/Transforms/ADCE/blocks-with-dead-term-nondeterministic.ll @@ -5,7 +5,7 @@ target triple = "x86_64-apple-macosx10.10.0" ; CHECK: uselistorder label %bb16, { 1, 0 } ; Function Attrs: noinline nounwind ssp uwtable -define void @ham(i1 %arg) local_unnamed_addr #0 { +define void @ham(i1 %arg) local_unnamed_addr { bb: br i1 false, label %bb1, label %bb22 @@ -64,8 +64,6 @@ bb22: ; preds = %bb21, %bb ret void } -attributes #0 = { noinline nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } - !llvm.module.flags = !{!0} !0 = !{i32 7, !"PIC Level", i32 2} diff --git a/llvm/test/Transforms/AddDiscriminators/basic.ll b/llvm/test/Transforms/AddDiscriminators/basic.ll index 5186537..fc4c10a 100644 --- a/llvm/test/Transforms/AddDiscriminators/basic.ll +++ b/llvm/test/Transforms/AddDiscriminators/basic.ll @@ -11,7 +11,7 @@ ; if (i < 10) x = i; ; } -define void @foo(i32 %i) #0 !dbg !4 { +define void @foo(i32 %i) !dbg !4 { entry: %i.addr = alloca i32, align 4 %x = alloca i32, align 4 @@ -35,8 +35,6 @@ if.end: ; preds = %if.then, %entry ; CHECK: ret void, !dbg ![[END:[0-9]+]] } -attributes #0 = { nounwind uwtable noinline optnone "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } - !llvm.dbg.cu = !{!0} !llvm.module.flags = !{!7, !8} !llvm.ident = !{!9} diff --git a/llvm/test/Transforms/AddDiscriminators/call-nested.ll b/llvm/test/Transforms/AddDiscriminators/call-nested.ll index 99340a5..f1373e4 100644 --- a/llvm/test/Transforms/AddDiscriminators/call-nested.ll +++ b/llvm/test/Transforms/AddDiscriminators/call-nested.ll @@ -9,7 +9,7 @@ ; #6 } ; Function Attrs: uwtable -define i32 @_Z3bazv() #0 !dbg !4 { +define i32 @_Z3bazv() !dbg !4 { %1 = call i32 @_Z3barv(), !dbg !11 ; CHECK: %1 = call i32 @_Z3barv(), !dbg ![[CALL0:[0-9]+]] %2 = call i32 @_Z3barv(), !dbg !12 @@ -19,12 +19,9 @@ define i32 @_Z3bazv() #0 !dbg !4 { ret i32 %3, !dbg !14 } -declare i32 @_Z3fooii(i32, i32) #1 +declare i32 @_Z3fooii(i32, i32) -declare i32 @_Z3barv() #1 - -attributes #0 = { uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } +declare i32 @_Z3barv() !llvm.dbg.cu = !{!0} !llvm.module.flags = !{!8, !9} diff --git a/llvm/test/Transforms/AddDiscriminators/call.ll b/llvm/test/Transforms/AddDiscriminators/call.ll index 93d3aa4..11b21ef 100644 --- a/llvm/test/Transforms/AddDiscriminators/call.ll +++ b/llvm/test/Transforms/AddDiscriminators/call.ll @@ -8,7 +8,7 @@ ; #5 } ; Function Attrs: uwtable -define void @_Z3foov() #0 !dbg !4 { +define void @_Z3foov() !dbg !4 { call void @_Z3barv(), !dbg !10 ; CHECK: call void @_Z3barv(), !dbg ![[CALL0:[0-9]+]] %a = alloca [100 x i8], align 16 @@ -21,13 +21,10 @@ define void @_Z3foov() #0 !dbg !4 { ret void, !dbg !13 } -declare void @_Z3barv() #1 +declare void @_Z3barv() declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind argmemonly declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind argmemonly -attributes #0 = { uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" } - !llvm.dbg.cu = !{!0} !llvm.module.flags = !{!7, !8} !llvm.ident = !{!9} diff --git a/llvm/test/Transforms/AddDiscriminators/diamond.ll b/llvm/test/Transforms/AddDiscriminators/diamond.ll index c93a57a..9edcf39 100644 --- a/llvm/test/Transforms/AddDiscriminators/diamond.ll +++ b/llvm/test/Transforms/AddDiscriminators/diamond.ll @@ -12,7 +12,7 @@ ; bar(3): discriminator 2 ; Function Attrs: uwtable -define void @_Z3fooi(i32 %i) #0 !dbg !4 { +define void @_Z3fooi(i32 %i) !dbg !4 { %1 = alloca i32, align 4 store i32 %i, ptr %1, align 4 call void @llvm.dbg.declare(metadata ptr %1, metadata !11, metadata !12), !dbg !13 @@ -34,13 +34,9 @@ define void @_Z3fooi(i32 %i) #0 !dbg !4 { } ; Function Attrs: nounwind readnone -declare void @llvm.dbg.declare(metadata, metadata, metadata) #1 +declare void @llvm.dbg.declare(metadata, metadata, metadata) -declare void @_Z3bari(i32) #2 - -attributes #0 = { uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { nounwind readnone } -attributes #2 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" } +declare void @_Z3bari(i32) !llvm.dbg.cu = !{!0} !llvm.module.flags = !{!8, !9} diff --git a/llvm/test/Transforms/AddDiscriminators/first-only.ll b/llvm/test/Transforms/AddDiscriminators/first-only.ll index 7ae9ed0..415e5f0 100644 --- a/llvm/test/Transforms/AddDiscriminators/first-only.ll +++ b/llvm/test/Transforms/AddDiscriminators/first-only.ll @@ -13,7 +13,7 @@ ; } ; } -define void @foo(i32 %i) #0 !dbg !4 { +define void @foo(i32 %i) !dbg !4 { entry: %i.addr = alloca i32, align 4 %x = alloca i32, align 4 @@ -44,8 +44,6 @@ if.end: ; preds = %if.then, %entry ; CHECK: ret void, !dbg ![[END:[0-9]+]] } -attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } - !llvm.dbg.cu = !{!0} !llvm.module.flags = !{!7, !8} !llvm.ident = !{!9} diff --git a/llvm/test/Transforms/AddDiscriminators/invoke.ll b/llvm/test/Transforms/AddDiscriminators/invoke.ll index d39014d..a3989b6 100644 --- a/llvm/test/Transforms/AddDiscriminators/invoke.ll +++ b/llvm/test/Transforms/AddDiscriminators/invoke.ll @@ -5,14 +5,14 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-apple-macosx10.14.0" ; Function Attrs: ssp uwtable -define void @_Z3foov() #0 personality ptr @__gxx_personality_v0 !dbg !8 { +define void @_Z3foov() personality ptr @__gxx_personality_v0 !dbg !8 { entry: %exn.slot = alloca ptr %ehselector.slot = alloca i32 ; CHECK: call void @_Z12bar_noexceptv({{.*}} !dbg ![[CALL1:[0-9]+]] - call void @_Z12bar_noexceptv() #4, !dbg !11 + call void @_Z12bar_noexceptv(), !dbg !11 ; CHECK: call void @_Z12bar_noexceptv({{.*}} !dbg ![[CALL2:[0-9]+]] - call void @_Z12bar_noexceptv() #4, !dbg !13 + call void @_Z12bar_noexceptv(), !dbg !13 invoke void @_Z3barv() ; CHECK: unwind label {{.*}} !dbg ![[INVOKE:[0-9]+]] to label %invoke.cont unwind label %lpad, !dbg !14 @@ -31,8 +31,8 @@ lpad: ; preds = %entry catch: ; preds = %lpad %exn = load ptr, ptr %exn.slot, align 8, !dbg !15 - %3 = call ptr @__cxa_begin_catch(ptr %exn) #4, !dbg !15 - invoke void @__cxa_rethrow() #5 + %3 = call ptr @__cxa_begin_catch(ptr %exn), !dbg !15 + invoke void @__cxa_rethrow() to label %unreachable unwind label %lpad1, !dbg !17 lpad1: ; preds = %catch @@ -62,7 +62,7 @@ terminate.lpad: ; preds = %lpad1 %7 = landingpad { ptr, i32 } catch ptr null, !dbg !20 %8 = extractvalue { ptr, i32 } %7, 0, !dbg !20 - call void @__clang_call_terminate(ptr %8) #6, !dbg !20 + call void @__clang_call_terminate(ptr %8), !dbg !20 unreachable, !dbg !20 unreachable: ; preds = %catch @@ -70,9 +70,9 @@ unreachable: ; preds = %catch } ; Function Attrs: nounwind -declare void @_Z12bar_noexceptv() #1 +declare void @_Z12bar_noexceptv() -declare void @_Z3barv() #2 +declare void @_Z3barv() declare i32 @__gxx_personality_v0(...) @@ -83,22 +83,14 @@ declare void @__cxa_rethrow() declare void @__cxa_end_catch() ; Function Attrs: noinline noreturn nounwind -define linkonce_odr hidden void @__clang_call_terminate(ptr) #3 { - %2 = call ptr @__cxa_begin_catch(ptr %0) #4 - call void @_ZSt9terminatev() #6 +define linkonce_odr hidden void @__clang_call_terminate(ptr) { + %2 = call ptr @__cxa_begin_catch(ptr %0) + call void @_ZSt9terminatev() unreachable } declare void @_ZSt9terminatev() -attributes #0 = { ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #3 = { noinline noreturn nounwind } -attributes #4 = { nounwind } -attributes #5 = { noreturn } -attributes #6 = { noreturn nounwind } - !llvm.dbg.cu = !{!0} !llvm.module.flags = !{!3, !4, !5, !6} !llvm.ident = !{!7} diff --git a/llvm/test/Transforms/AddDiscriminators/multiple.ll b/llvm/test/Transforms/AddDiscriminators/multiple.ll index 54c1a5d..8e8ca6a 100644 --- a/llvm/test/Transforms/AddDiscriminators/multiple.ll +++ b/llvm/test/Transforms/AddDiscriminators/multiple.ll @@ -10,7 +10,7 @@ ; The two stores inside the if-then-else line must have different discriminator ; values. -define void @foo(i32 %i) #0 !dbg !4 { +define void @foo(i32 %i) !dbg !4 { entry: %i.addr = alloca i32, align 4 %x = alloca i32, align 4 @@ -45,8 +45,6 @@ if.end: ; preds = %if.else, %if.then ret void, !dbg !12 } -attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } - !llvm.dbg.cu = !{!0} !llvm.module.flags = !{!7, !8} !llvm.ident = !{!9} diff --git a/llvm/test/Transforms/AddDiscriminators/no-discriminators.ll b/llvm/test/Transforms/AddDiscriminators/no-discriminators.ll index c23edd6..f84579b 100644 --- a/llvm/test/Transforms/AddDiscriminators/no-discriminators.ll +++ b/llvm/test/Transforms/AddDiscriminators/no-discriminators.ll @@ -12,7 +12,7 @@ ; altered. If they are, it means that the discriminators pass added a ; new lexical scope. -define i32 @foo(i64 %i) #0 !dbg !4 { +define i32 @foo(i64 %i) !dbg !4 { entry: %retval = alloca i32, align 4 %i.addr = alloca i64, align 8 @@ -39,10 +39,7 @@ return: ; preds = %if.else, %if.then } ; Function Attrs: nounwind readnone -declare void @llvm.dbg.declare(metadata, metadata, metadata) #1 - -attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { nounwind readnone } +declare void @llvm.dbg.declare(metadata, metadata, metadata) ; We should be able to add discriminators even in the absence of llvm.dbg.cu. ; When using sample profiles, the front end will generate line tables but it diff --git a/llvm/test/Transforms/AddDiscriminators/oneline.ll b/llvm/test/Transforms/AddDiscriminators/oneline.ll index 533d547..fc1675b 100644 --- a/llvm/test/Transforms/AddDiscriminators/oneline.ll +++ b/llvm/test/Transforms/AddDiscriminators/oneline.ll @@ -10,7 +10,7 @@ ; return 100: discriminator 4 ; return 99: discriminator 6 -define i32 @_Z3fooi(i32 %i) #0 !dbg !4 { +define i32 @_Z3fooi(i32 %i) !dbg !4 { %1 = alloca i32, align 4 %2 = alloca i32, align 4 store i32 %i, ptr %2, align 4, !tbaa !13 @@ -49,10 +49,7 @@ define i32 @_Z3fooi(i32 %i) #0 !dbg !4 { } ; Function Attrs: nounwind readnone -declare void @llvm.dbg.declare(metadata, metadata, metadata) #1 - -attributes #0 = { nounwind uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { nounwind readnone } +declare void @llvm.dbg.declare(metadata, metadata, metadata) !llvm.dbg.cu = !{!0} !llvm.module.flags = !{!10, !11} diff --git a/llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll b/llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll index eb7d78f..4704238 100644 --- a/llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll +++ b/llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll @@ -1557,24 +1557,24 @@ declare dso_local void @_GLOBAL__sub_I_register_benchmark_test.cc() #0 section " ; Function Attrs: cold noreturn nounwind declare void @llvm.trap() #20 -attributes #0 = { uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #2 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" } +attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" } +attributes #2 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" } attributes #3 = { nounwind } -attributes #4 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #4 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" } attributes #5 = { argmemonly nounwind willreturn } -attributes #6 = { alwaysinline uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #7 = { alwaysinline nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #8 = { nobuiltin "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #9 = { nobuiltin nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #10 = { inlinehint uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #11 = { inlinehint nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #12 = { noreturn nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #13 = { norecurse uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #6 = { alwaysinline uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" } +attributes #7 = { alwaysinline nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" } +attributes #8 = { nobuiltin "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" } +attributes #9 = { nobuiltin nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" } +attributes #10 = { inlinehint uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" } +attributes #11 = { inlinehint nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" } +attributes #12 = { noreturn nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" } +attributes #13 = { norecurse uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" } attributes #14 = { nounwind readnone willreturn } -attributes #15 = { noreturn "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #15 = { noreturn "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" } attributes #16 = { noinline noreturn nounwind } attributes #17 = { argmemonly nounwind willreturn writeonly } -attributes #18 = { noreturn uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #19 = { inlinehint noreturn uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #18 = { noreturn uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" } +attributes #19 = { inlinehint noreturn uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" } attributes #20 = { cold noreturn nounwind } diff --git a/llvm/test/Transforms/CodeGenPrepare/ARM/bitreverse-recognize.ll b/llvm/test/Transforms/CodeGenPrepare/ARM/bitreverse-recognize.ll index d272fef..189186b 100644 --- a/llvm/test/Transforms/CodeGenPrepare/ARM/bitreverse-recognize.ll +++ b/llvm/test/Transforms/CodeGenPrepare/ARM/bitreverse-recognize.ll @@ -4,7 +4,7 @@ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64" target triple = "armv7--linux-gnueabihf" ; CHECK-LABEL: @f -define i32 @f(i32 %a) #0 { +define i32 @f(i32 %a) { ; CHECK: call i32 @llvm.bitreverse.i32 entry: br label %for.body @@ -25,8 +25,6 @@ for.body: ; preds = %for.body, %entry br i1 %exitcond, label %for.cond.cleanup, label %for.body, !llvm.loop !3 } -attributes #0 = { norecurse nounwind readnone "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a8" "target-features"="+dsp,+neon,+vfp3" "unsafe-fp-math"="false" "use-soft-float"="false" } - !llvm.module.flags = !{!0, !1} !llvm.ident = !{!2} diff --git a/llvm/test/Transforms/CodeGenPrepare/X86/bitreverse-hang.ll b/llvm/test/Transforms/CodeGenPrepare/X86/bitreverse-hang.ll index eec0967..35115cf 100644 --- a/llvm/test/Transforms/CodeGenPrepare/X86/bitreverse-hang.ll +++ b/llvm/test/Transforms/CodeGenPrepare/X86/bitreverse-hang.ll @@ -21,7 +21,7 @@ @b = common global i32 0, align 4 ; CHECK: define i32 @fn1 -define i32 @fn1() #0 { +define i32 @fn1() { entry: %b.promoted = load i32, ptr @b, align 4, !tbaa !2 br label %for.body @@ -40,8 +40,6 @@ for.end: ; preds = %for.body ret i32 undef } -attributes #0 = { norecurse nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3" "unsafe-fp-math"="false" "use-soft-float"="false" } - !llvm.module.flags = !{!0} !llvm.ident = !{!1} diff --git a/llvm/test/Transforms/CodeGenPrepare/dom-tree.ll b/llvm/test/Transforms/CodeGenPrepare/dom-tree.ll index 1c990ff..14360fe 100644 --- a/llvm/test/Transforms/CodeGenPrepare/dom-tree.ll +++ b/llvm/test/Transforms/CodeGenPrepare/dom-tree.ll @@ -10,7 +10,7 @@ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64" target triple = "armv7--linux-gnueabihf" -define i32 @f(i32 %a) #0 { +define i32 @f(i32 %a) { entry: br label %for.body @@ -30,8 +30,6 @@ for.body: br i1 %exitcond, label %for.cond.cleanup, label %for.body, !llvm.loop !3 } -attributes #0 = { norecurse nounwind readnone "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a8" "target-features"="+dsp,+neon,+vfp3" "unsafe-fp-math"="false" "use-soft-float"="false" } - !llvm.module.flags = !{!0, !1} !llvm.ident = !{!2} diff --git a/llvm/test/Transforms/ConstantHoisting/X86/ehpad.ll b/llvm/test/Transforms/ConstantHoisting/X86/ehpad.ll index 46fa066..78760db 100644 --- a/llvm/test/Transforms/ConstantHoisting/X86/ehpad.ll +++ b/llvm/test/Transforms/ConstantHoisting/X86/ehpad.ll @@ -20,7 +20,7 @@ target triple = "x86_64-pc-windows-msvc" ; BFIHOIST: br label %endif ; Function Attrs: norecurse -define i32 @main(i32 %argc, ptr nocapture readnone %argv) local_unnamed_addr #0 personality ptr @__CxxFrameHandler3 { +define i32 @main(i32 %argc, ptr nocapture readnone %argv) local_unnamed_addr personality ptr @__CxxFrameHandler3 { %call = tail call i64 @fn(i64 0) %call1 = tail call i64 @fn(i64 1) %tobool = icmp eq i32 %argc, 0 @@ -62,9 +62,6 @@ endif: ret i32 0 } -declare i64 @fn(i64) local_unnamed_addr #1 +declare i64 @fn(i64) local_unnamed_addr declare i32 @__CxxFrameHandler3(...) - -attributes #0 = { norecurse "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "stack-protector-buffer-size"="8" "target-features"="+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "stack-protector-buffer-size"="8" "target-features"="+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } diff --git a/llvm/test/Transforms/Coroutines/coro-debug.ll b/llvm/test/Transforms/Coroutines/coro-debug.ll index d1f1922..109be51f 100644 --- a/llvm/test/Transforms/Coroutines/coro-debug.ll +++ b/llvm/test/Transforms/Coroutines/coro-debug.ll @@ -14,7 +14,7 @@ entry: %0 = call token @llvm.coro.id(i32 0, ptr null, ptr @flink, ptr null), !dbg !16 %1 = call i64 @llvm.coro.size.i64(), !dbg !16 %call = call ptr @malloc(i64 %1), !dbg !16 - %2 = call ptr @llvm.coro.begin(token %0, ptr %call) #7, !dbg !16 + %2 = call ptr @llvm.coro.begin(token %0, ptr %call), !dbg !16 store ptr %2, ptr %coro_hdl, align 8, !dbg !16 %3 = call i8 @llvm.coro.suspend(token none, i1 false), !dbg !17 %conv = sext i8 %3 to i32, !dbg !17 @@ -69,7 +69,7 @@ coro_Cleanup: ; preds = %sw.epilog, %sw.bb1 br label %coro_Suspend, !dbg !24 coro_Suspend: ; preds = %coro_Cleanup, %sw.default - call void @llvm.coro.end(ptr null, i1 false, token none) #7, !dbg !24 + call void @llvm.coro.end(ptr null, i1 false, token none), !dbg !24 %7 = load ptr, ptr %coro_hdl, align 8, !dbg !24 store i32 0, ptr %late_local, !dbg !24 ret ptr %7, !dbg !24 @@ -82,47 +82,40 @@ ehcleanup: } ; Function Attrs: nounwind readnone speculatable -declare void @llvm.dbg.value(metadata, metadata, metadata) #1 +declare void @llvm.dbg.value(metadata, metadata, metadata) ; Function Attrs: nounwind readnone speculatable -declare void @llvm.dbg.declare(metadata, metadata, metadata) #1 +declare void @llvm.dbg.declare(metadata, metadata, metadata) ; Function Attrs: argmemonly nounwind readonly -declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr) #2 +declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr) -declare ptr @malloc(i64) #3 +declare ptr @malloc(i64) declare ptr @allocate() declare void @print({ ptr, i32 }) declare void @log() ; Function Attrs: nounwind readnone -declare i64 @llvm.coro.size.i64() #4 +declare i64 @llvm.coro.size.i64() ; Function Attrs: nounwind -declare ptr @llvm.coro.begin(token, ptr writeonly) #5 +declare ptr @llvm.coro.begin(token, ptr writeonly) ; Function Attrs: nounwind -declare i8 @llvm.coro.suspend(token, i1) #5 +declare i8 @llvm.coro.suspend(token, i1) -declare void @free(ptr) #3 +declare void @free(ptr) ; Function Attrs: argmemonly nounwind readonly -declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 +declare ptr @llvm.coro.free(token, ptr nocapture readonly) ; Function Attrs: nounwind -declare void @llvm.coro.end(ptr, i1, token) #5 +declare void @llvm.coro.end(ptr, i1, token) ; Function Attrs: argmemonly nounwind readonly -declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #2 - -attributes #0 = { noinline nounwind presplitcoroutine "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { nounwind readnone speculatable } -attributes #2 = { argmemonly nounwind readonly } -attributes #3 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #4 = { nounwind readnone } -attributes #5 = { nounwind } -attributes #6 = { alwaysinline } -attributes #7 = { noduplicate } +declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) + +attributes #0 = { noinline nounwind presplitcoroutine } !llvm.dbg.cu = !{!0} !llvm.module.flags = !{!3, !4} diff --git a/llvm/test/Transforms/Coroutines/coro-split-dbg.ll b/llvm/test/Transforms/Coroutines/coro-split-dbg.ll index c53bea8..577ca9a 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-dbg.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-dbg.ll @@ -6,9 +6,9 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" ; Function Attrs: nounwind readnone -declare void @llvm.dbg.declare(metadata, metadata, metadata) #1 +declare void @llvm.dbg.declare(metadata, metadata, metadata) -declare void @bar(...) local_unnamed_addr #2 +declare void @bar(...) local_unnamed_addr ; Function Attrs: nounwind uwtable define ptr @f() #3 !dbg !16 { @@ -16,14 +16,14 @@ entry: %0 = tail call token @llvm.coro.id(i32 0, ptr null, ptr @f, ptr null), !dbg !26 %1 = tail call i64 @llvm.coro.size.i64(), !dbg !26 %call = tail call ptr @malloc(i64 %1), !dbg !26 - %2 = tail call ptr @llvm.coro.begin(token %0, ptr %call) #9, !dbg !26 + %2 = tail call ptr @llvm.coro.begin(token %0, ptr %call), !dbg !26 tail call void @llvm.dbg.value(metadata ptr %2, metadata !21, metadata !12), !dbg !26 br label %for.cond, !dbg !27 for.cond: ; preds = %for.cond, %entry tail call void @llvm.dbg.value(metadata i32 undef, metadata !22, metadata !12), !dbg !28 - tail call void @llvm.dbg.value(metadata i32 undef, metadata !11, metadata !12) #7, !dbg !29 - tail call void (...) @bar() #7, !dbg !33 + tail call void @llvm.dbg.value(metadata i32 undef, metadata !11, metadata !12), !dbg !29 + tail call void (...) @bar(), !dbg !33 %3 = tail call token @llvm.coro.save(ptr null), !dbg !34 %4 = tail call i8 @llvm.coro.suspend(token %3, i1 false), !dbg !34 %conv = sext i8 %4 to i32, !dbg !34 @@ -38,40 +38,31 @@ coro_Cleanup: ; preds = %for.cond br label %coro_Suspend, !dbg !36 coro_Suspend: ; preds = %for.cond, %if.then, %coro_Cleanup - tail call void @llvm.coro.end(ptr null, i1 false, token none) #9, !dbg !38 + tail call void @llvm.coro.end(ptr null, i1 false, token none), !dbg !38 ret ptr %2, !dbg !39 } ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.start.p0(ptr nocapture) #4 +declare void @llvm.lifetime.start.p0(ptr nocapture) ; Function Attrs: argmemonly nounwind readonly -declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr) #5 +declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr) ; Function Attrs: nounwind -declare noalias ptr @malloc(i64) local_unnamed_addr #6 -declare i64 @llvm.coro.size.i64() #1 -declare ptr @llvm.coro.begin(token, ptr writeonly) #7 -declare token @llvm.coro.save(ptr) #7 -declare i8 @llvm.coro.suspend(token, i1) #7 -declare void @llvm.lifetime.end.p0(ptr nocapture) #4 -declare ptr @llvm.coro.free(token, ptr nocapture readonly) #5 -declare void @free(ptr nocapture) local_unnamed_addr #6 -declare void @llvm.coro.end(ptr, i1, token) #7 -declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #5 +declare noalias ptr @malloc(i64) local_unnamed_addr +declare i64 @llvm.coro.size.i64() +declare ptr @llvm.coro.begin(token, ptr writeonly) +declare token @llvm.coro.save(ptr) +declare i8 @llvm.coro.suspend(token, i1) +declare void @llvm.lifetime.end.p0(ptr nocapture) +declare ptr @llvm.coro.free(token, ptr nocapture readonly) +declare void @free(ptr nocapture) local_unnamed_addr +declare void @llvm.coro.end(ptr, i1, token) +declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) -declare void @llvm.dbg.value(metadata, metadata, metadata) #1 +declare void @llvm.dbg.value(metadata, metadata, metadata) -attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { nounwind readnone } -attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #3 = { nounwind uwtable presplitcoroutine "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #4 = { argmemonly nounwind } -attributes #5 = { argmemonly nounwind readonly } -attributes #6 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #7 = { nounwind } -attributes #8 = { alwaysinline nounwind } -attributes #9 = { noduplicate } +attributes #3 = { nounwind uwtable presplitcoroutine } !llvm.dbg.cu = !{!0} !llvm.module.flags = !{!3, !4} diff --git a/llvm/test/Transforms/Util/dbg-user-of-aext.ll b/llvm/test/Transforms/Util/dbg-user-of-aext.ll index 9e7935e..b3d1b90 100644 --- a/llvm/test/Transforms/Util/dbg-user-of-aext.ll +++ b/llvm/test/Transforms/Util/dbg-user-of-aext.ll @@ -27,7 +27,7 @@ %struct.foo = type { i8, i64 } ; Function Attrs: noinline nounwind uwtable -define void @_Z1fbb3foo(i1 zeroext %b, i1 zeroext %frag, i8 %g.coerce0, i64 %g.coerce1) #0 !dbg !6 { +define void @_Z1fbb3foo(i1 zeroext %b, i1 zeroext %frag, i8 %g.coerce0, i64 %g.coerce1) !dbg !6 { entry: %g = alloca %struct.foo, align 8 %b.addr = alloca i8, align 1 @@ -51,10 +51,7 @@ entry: ; CHECK: ![[VAR_FRAG]] = !DILocalVariable(name: "frag" ; Function Attrs: nounwind readnone speculatable -declare void @llvm.dbg.declare(metadata, metadata, metadata) #1 - -attributes #0 = { noinline nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { nounwind readnone speculatable } +declare void @llvm.dbg.declare(metadata, metadata, metadata) !llvm.dbg.cu = !{!0} !llvm.module.flags = !{!3, !4} diff --git a/llvm/test/Transforms/Util/libcalls-fast-math-inf-loop.ll b/llvm/test/Transforms/Util/libcalls-fast-math-inf-loop.ll index ad23bf7..e9f0c8c 100644 --- a/llvm/test/Transforms/Util/libcalls-fast-math-inf-loop.ll +++ b/llvm/test/Transforms/Util/libcalls-fast-math-inf-loop.ll @@ -19,18 +19,18 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-unknown" ; Function Attrs: nounwind -define float @fn(float %f) #0 { +define float @fn(float %f) { ; CHECK: define float @fn( ; CHECK: call fast float @expf( %f.addr = alloca float, align 4 store float %f, ptr %f.addr, align 4, !tbaa !1 %1 = load float, ptr %f.addr, align 4, !tbaa !1 - %call = call fast float @expf(float %1) #3 + %call = call fast float @expf(float %1) ret float %call } ; Function Attrs: inlinehint nounwind readnone -define available_externally float @expf(float %x) #1 { +define available_externally float @expf(float %x) { ; CHECK: define available_externally float @expf( ; CHECK: fpext float ; CHECK: call fast double @exp( @@ -39,17 +39,13 @@ define available_externally float @expf(float %x) #1 { store float %x, ptr %x.addr, align 4, !tbaa !1 %1 = load float, ptr %x.addr, align 4, !tbaa !1 %conv = fpext float %1 to double - %call = call fast double @exp(double %conv) #3 + %call = call fast double @exp(double %conv) %conv1 = fptrunc double %call to float ret float %conv1 } ; Function Attrs: nounwind readnone -declare double @exp(double) #2 - -attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { inlinehint nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #2 = { nounwind readnone } +declare double @exp(double) !llvm.ident = !{!0} diff --git a/llvm/test/tools/llvm-ir2vec/embeddings-flowaware.ll b/llvm/test/tools/llvm-ir2vec/embeddings-flowaware.ll index b2362f8..ade228d 100644 --- a/llvm/test/tools/llvm-ir2vec/embeddings-flowaware.ll +++ b/llvm/test/tools/llvm-ir2vec/embeddings-flowaware.ll @@ -49,7 +49,7 @@ entry: ; CHECK-FUNC-LEVEL-ABC: Function: abc ; CHECK-FUNC-LEVEL-NEXT-ABC: [ 3630.00 3672.00 3714.00 ] -; CHECK-FUNC-DEF: Error: Function 'def' not found +; CHECK-FUNC-DEF: error: Function 'def' not found ; CHECK-BB-LEVEL: Function: abc ; CHECK-BB-LEVEL-NEXT: entry: [ 3630.00 3672.00 3714.00 ] diff --git a/llvm/test/tools/llvm-ir2vec/embeddings-symbolic.ll b/llvm/test/tools/llvm-ir2vec/embeddings-symbolic.ll index f9aa108..9d60e12 100644 --- a/llvm/test/tools/llvm-ir2vec/embeddings-symbolic.ll +++ b/llvm/test/tools/llvm-ir2vec/embeddings-symbolic.ll @@ -49,7 +49,7 @@ entry: ; CHECK-FUNC-LEVEL-ABC: Function: abc ; CHECK-FUNC-LEVEL-NEXT-ABC: [ 878.00 889.00 900.00 ] -; CHECK-FUNC-DEF: Error: Function 'def' not found +; CHECK-FUNC-DEF: error: Function 'def' not found ; CHECK-BB-LEVEL: Function: abc ; CHECK-BB-LEVEL-NEXT: entry: [ 878.00 889.00 900.00 ] diff --git a/llvm/test/tools/llvm-ir2vec/embeddings-symbolic.mir b/llvm/test/tools/llvm-ir2vec/embeddings-symbolic.mir index e5f78bf..ef835fe 100644 --- a/llvm/test/tools/llvm-ir2vec/embeddings-symbolic.mir +++ b/llvm/test/tools/llvm-ir2vec/embeddings-symbolic.mir @@ -67,7 +67,7 @@ body: | # CHECK-FUNC-LEVEL-ADD-NEXT: Function vector: [ 26.50 27.10 27.70 ] # CHECK-FUNC-LEVEL-ADD-NOT: simple_function -# CHECK-FUNC-MISSING: Error: Function 'missing_function' not found +# CHECK-FUNC-MISSING: error: Function 'missing_function' not found # CHECK-BB-LEVEL: MIR2Vec embeddings for machine function add_function: # CHECK-BB-LEVEL-NEXT: Basic block vectors: diff --git a/llvm/test/tools/llvm-ir2vec/error-handling.ll b/llvm/test/tools/llvm-ir2vec/error-handling.ll index b944ea0..8e9e455 100644 --- a/llvm/test/tools/llvm-ir2vec/error-handling.ll +++ b/llvm/test/tools/llvm-ir2vec/error-handling.ll @@ -10,4 +10,4 @@ entry: } ; CHECK-NO-VOCAB: error: IR2Vec vocabulary file path not specified; You may need to set it using --ir2vec-vocab-path -; CHECK-FUNC-NOT-FOUND: Error: Function 'nonexistent' not found +; CHECK-FUNC-NOT-FOUND: error: Function 'nonexistent' not found diff --git a/llvm/test/tools/llvm-ir2vec/error-handling.mir b/llvm/test/tools/llvm-ir2vec/error-handling.mir index 154078c..caec454c 100644 --- a/llvm/test/tools/llvm-ir2vec/error-handling.mir +++ b/llvm/test/tools/llvm-ir2vec/error-handling.mir @@ -31,11 +31,11 @@ body: | $eax = COPY %0 RET 0, $eax -# CHECK-NO-VOCAB: Error: Failed to load MIR2Vec vocabulary - MIR2Vec vocabulary file path not specified; set it using --mir2vec-vocab-path +# CHECK-NO-VOCAB: error: Failed to load MIR2Vec vocabulary - MIR2Vec vocabulary file path not specified; set it using --mir2vec-vocab-path -# CHECK-VOCAB-NOT-FOUND: Error: Failed to load MIR2Vec vocabulary +# CHECK-VOCAB-NOT-FOUND: error: Failed to load MIR2Vec vocabulary # CHECK-VOCAB-NOT-FOUND: No such file or directory -# CHECK-INVALID-VOCAB: Error: Failed to load MIR2Vec vocabulary - Missing 'Opcodes' section in vocabulary file +# CHECK-INVALID-VOCAB: error: Failed to load MIR2Vec vocabulary - Missing 'Opcodes' section in vocabulary file -# CHECK-FUNC-NOT-FOUND: Error: Function 'nonexistent_function' not found +# CHECK-FUNC-NOT-FOUND: error: Function 'nonexistent_function' not found |