diff options
Diffstat (limited to 'clang/test')
42 files changed, 4710 insertions, 305 deletions
diff --git a/clang/test/AST/ByteCode/cxx11.cpp b/clang/test/AST/ByteCode/cxx11.cpp index 427d3a1..e283a7b 100644 --- a/clang/test/AST/ByteCode/cxx11.cpp +++ b/clang/test/AST/ByteCode/cxx11.cpp @@ -374,7 +374,7 @@ namespace GH150709 {  namespace DiscardedAddrLabel {    void foo(void) {    L: -    *&&L; // both-error {{indirection not permitted}} \ +    *&&L; // both-error {{indirection not permitted on operand of type 'void *'}} \            // both-warning {{expression result unused}}    }  } diff --git a/clang/test/C/C2y/n3457.c b/clang/test/C/C2y/n3457.c new file mode 100644 index 0000000..d71a3f3 --- /dev/null +++ b/clang/test/C/C2y/n3457.c @@ -0,0 +1,38 @@ +// RUN: %clang_cc1 -verify=ext -std=c23 -pedantic %s +// RUN: %clang_cc1 -verify=ext -pedantic -x c++ %s +// RUN: %clang_cc1 -verify=pre -std=c2y -pedantic -Wpre-c2y-compat %s + +/* WG14 N3457: Clang 22 + * The __COUNTER__ predefined macro + * + * This predefined macro was supported as an extension in earlier versions of + * Clang, but the required diagnostics for the limits were not added until 22. + */ + +// Ensure that __COUNTER__ starts from 0. +static_assert(__COUNTER__ == 0); /* ext-warning {{'__COUNTER__' is a C2y extension}} +                                    pre-warning {{'__COUNTER__' is incompatible with standards before C2y}} +                                  */ + +// Ensure that the produced value can be used with token concatenation. +#define CAT_IMPL(a, b) a ## b +#define CAT(a, b) CAT_IMPL(a, b) +#define NAME_WITH_COUNTER(a) CAT(a, __COUNTER__) +void test() { +  // Because this is the 2nd expansion, this defines test1. +  int NAME_WITH_COUNTER(test); /* ext-warning {{'__COUNTER__' is a C2y extension}} +                                  pre-warning {{'__COUNTER__' is incompatible with standards before C2y}} +                                */ +  int other_test = test1;      // Ok +} + +// Ensure that __COUNTER__ increments each time you mention it. +static_assert(__COUNTER__ == 2); /* ext-warning {{'__COUNTER__' is a C2y extension}} +                                    pre-warning {{'__COUNTER__' is incompatible with standards before C2y}} +                                 */ +static_assert(__COUNTER__ == 3); /* ext-warning {{'__COUNTER__' is a C2y extension}} +                                    pre-warning {{'__COUNTER__' is incompatible with standards before C2y}} +                                 */ +static_assert(__COUNTER__ == 4); /* ext-warning {{'__COUNTER__' is a C2y extension}} +                                    pre-warning {{'__COUNTER__' is incompatible with standards before C2y}} +                                 */ diff --git a/clang/test/C/C2y/n3457_1.c b/clang/test/C/C2y/n3457_1.c new file mode 100644 index 0000000..76c5a0b --- /dev/null +++ b/clang/test/C/C2y/n3457_1.c @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -verify -std=c2y -finitial-counter-value=2147483646 %s + +// The value produced needs to be a type that's representable with a signed +// long. However, the actual type it expands to does *not* need to be forced to +// be signed long because that would generally mean suffixing the value with L, +// which would be very surprising for folks using this to generate unique ids. +// We'll test this by ensuring the largest value can be expanded properly and +// an assertion that signed long is always at least four bytes wide (which is +// what's required to represent that maximal value). +// +// So we set the initial counter value to 2147483646, we'll validate that, +// increment it once to get to the maximal value and ensure there's no +// diagnostic, then increment again to ensure we get the constraint violation. + +static_assert(__COUNTER__ == 2147483646); // Test and increment +static_assert(__COUNTER__ == 2147483647); // Test and increment + +// This one should fail. +signed long i = __COUNTER__; // expected-error {{'__COUNTER__' value cannot exceed 2'147'483'647}} + diff --git a/clang/test/C/C2y/n3457_2.c b/clang/test/C/C2y/n3457_2.c new file mode 100644 index 0000000..018c8f4 --- /dev/null +++ b/clang/test/C/C2y/n3457_2.c @@ -0,0 +1,10 @@ +// RUN: %clang_cc1 -verify=good -std=c2y -finitial-counter-value=2147483648 %s +// RUN: %clang_cc1 -verify -std=c2y -finitial-counter-value=2147483648 -DEXPAND_IT %s +// good-no-diagnostics + +// This sets the intial __COUNTER__ value to something that's too big. Setting +// the value too large is fine. Expanding to a too-large value is not. +#ifdef EXPAND_IT +  // This one should fail. +  signed long i = __COUNTER__; // expected-error {{'__COUNTER__' value cannot exceed 2'147'483'647}} +#endif diff --git a/clang/test/CIR/CodeGen/builtins-floating-point.c b/clang/test/CIR/CodeGen/builtins-floating-point.c index 193cc172..8bdc43c 100644 --- a/clang/test/CIR/CodeGen/builtins-floating-point.c +++ b/clang/test/CIR/CodeGen/builtins-floating-point.c @@ -7,14 +7,21 @@  float cosf(float f) {    return __builtin_cosf(f); -  // CHECK: %{{.*}} = cir.cos {{.*}} : !cir.float +  // CIR: %{{.*}} = cir.cos %{{.*}} : !cir.float    // LLVM: %{{.*}} = call float @llvm.cos.f32(float %{{.*}})    // OGCG: %{{.*}} = call float @llvm.cos.f32(float %{{.*}})  }  double cos(double f) {    return __builtin_cos(f); -  // CIR: {{.+}} = cir.cos {{.+}} : !cir.double +  // CIR: %{{.*}} = cir.cos %{{.*}} : !cir.double    // LLVM: %{{.*}} = call double @llvm.cos.f64(double %{{.*}})    // OGCG: %{{.*}} = call double @llvm.cos.f64(double %{{.*}})  } + +float ceil(float f) { +  return __builtin_ceilf(f); +  // CIR: %{{.*}} = cir.ceil %{{.*}} : !cir.float +  // LLVM: %{{.*}} = call float @llvm.ceil.f32(float %{{.*}}) +  // OGCG: %{{.*}} = call float @llvm.ceil.f32(float %{{.*}}) +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/non-overloaded/sf_vfexp_v_16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/non-overloaded/sf_vfexp_v_16.c new file mode 100644 index 0000000..a0d5845 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/non-overloaded/sf_vfexp_v_16.c @@ -0,0 +1,131 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x -target-feature +zvfh \ +// RUN:   -target-feature +xsfvfexp16e -disable-O0-optnone  \ +// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN:   FileCheck --check-prefix=CHECK-RV64 %s + +#include <sifive_vector.h> + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_sf_vfexp_v_f16mf4( +// CHECK-RV64-SAME: <vscale x 1 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vfexp.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]] +// +vfloat16mf4_t test_sf_vfexp_v_f16mf4(vfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16mf4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_sf_vfexp_v_f16mf2( +// CHECK-RV64-SAME: <vscale x 2 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vfexp.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]] +// +vfloat16mf2_t test_sf_vfexp_v_f16mf2(vfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_sf_vfexp_v_f16m1( +// CHECK-RV64-SAME: <vscale x 4 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vfexp.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]] +// +vfloat16m1_t test_sf_vfexp_v_f16m1(vfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_sf_vfexp_v_f16m2( +// CHECK-RV64-SAME: <vscale x 8 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vfexp.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]] +// +vfloat16m2_t test_sf_vfexp_v_f16m2(vfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_sf_vfexp_v_f16m4( +// CHECK-RV64-SAME: <vscale x 16 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vfexp.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]] +// +vfloat16m4_t test_sf_vfexp_v_f16m4(vfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_sf_vfexp_v_f16m8( +// CHECK-RV64-SAME: <vscale x 32 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vfexp.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]] +// +vfloat16m8_t test_sf_vfexp_v_f16m8(vfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16m8(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_sf_vfexp_v_f16mf4_m( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vfexp.mask.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]] +// +vfloat16mf4_t test_sf_vfexp_v_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexp_v_f16mf4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_sf_vfexp_v_f16mf2_m( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vfexp.mask.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]] +// +vfloat16mf2_t test_sf_vfexp_v_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexp_v_f16mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_sf_vfexp_v_f16m1_m( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vfexp.mask.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]] +// +vfloat16m1_t test_sf_vfexp_v_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, +                                     size_t vl) { +  return __riscv_sf_vfexp_v_f16m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_sf_vfexp_v_f16m2_m( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vfexp.mask.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]] +// +vfloat16m2_t test_sf_vfexp_v_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_sf_vfexp_v_f16m4_m( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vfexp.mask.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]] +// +vfloat16m4_t test_sf_vfexp_v_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16m4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_sf_vfexp_v_f16m8_m( +// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vfexp.mask.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]] +// +vfloat16m8_t test_sf_vfexp_v_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16m8_m(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/non-overloaded/sf_vfexp_v_32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/non-overloaded/sf_vfexp_v_32.c new file mode 100644 index 0000000..25d0991 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/non-overloaded/sf_vfexp_v_32.c @@ -0,0 +1,111 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x -target-feature +zve32f \ +// RUN:   -target-feature +xsfvfexp32e -disable-O0-optnone  \ +// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN:   FileCheck --check-prefix=CHECK-RV64 %s + +#include <sifive_vector.h> + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_sf_vfexp_v_f32mf2( +// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vfexp.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]] +// +vfloat32mf2_t test_sf_vfexp_v_f32mf2(vfloat32mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_sf_vfexp_v_f32m1( +// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vfexp.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]] +// +vfloat32m1_t test_sf_vfexp_v_f32m1(vfloat32m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_sf_vfexp_v_f32m2( +// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vfexp.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]] +// +vfloat32m2_t test_sf_vfexp_v_f32m2(vfloat32m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_sf_vfexp_v_f32m4( +// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vfexp.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]] +// +vfloat32m4_t test_sf_vfexp_v_f32m4(vfloat32m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_sf_vfexp_v_f32m8( +// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vfexp.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]] +// +vfloat32m8_t test_sf_vfexp_v_f32m8(vfloat32m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32m8(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_sf_vfexp_v_f32mf2_m( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vfexp.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]] +// +vfloat32mf2_t test_sf_vfexp_v_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexp_v_f32mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_sf_vfexp_v_f32m1_m( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vfexp.mask.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]] +// +vfloat32m1_t test_sf_vfexp_v_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, +                                     size_t vl) { +  return __riscv_sf_vfexp_v_f32m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_sf_vfexp_v_f32m2_m( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vfexp.mask.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]] +// +vfloat32m2_t test_sf_vfexp_v_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, +                                     size_t vl) { +  return __riscv_sf_vfexp_v_f32m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_sf_vfexp_v_f32m4_m( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vfexp.mask.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]] +// +vfloat32m4_t test_sf_vfexp_v_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32m4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_sf_vfexp_v_f32m8_m( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vfexp.mask.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]] +// +vfloat32m8_t test_sf_vfexp_v_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32m8_m(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/non-overloaded/sf_vfexp_v_bf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/non-overloaded/sf_vfexp_v_bf.c new file mode 100644 index 0000000..9fc332a --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/non-overloaded/sf_vfexp_v_bf.c @@ -0,0 +1,135 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x -target-feature +zve32f \ +// RUN:   -target-feature +zvfbfmin -target-feature +xsfvfbfexp16e -disable-O0-optnone  \ +// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN:   FileCheck --check-prefix=CHECK-RV64 %s + +#include <sifive_vector.h> + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_sf_vfexp_v_bf16mf4( +// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.sf.vfexp.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]] +// +vbfloat16mf4_t test_sf_vfexp_v_bf16mf4(vbfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16mf4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_sf_vfexp_v_bf16mf2( +// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.sf.vfexp.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]] +// +vbfloat16mf2_t test_sf_vfexp_v_bf16mf2(vbfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_sf_vfexp_v_bf16m1( +// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.sf.vfexp.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]] +// +vbfloat16m1_t test_sf_vfexp_v_bf16m1(vbfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_sf_vfexp_v_bf16m2( +// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.sf.vfexp.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]] +// +vbfloat16m2_t test_sf_vfexp_v_bf16m2(vbfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_sf_vfexp_v_bf16m4( +// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.sf.vfexp.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]] +// +vbfloat16m4_t test_sf_vfexp_v_bf16m4(vbfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_sf_vfexp_v_bf16m8( +// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.sf.vfexp.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]] +// +vbfloat16m8_t test_sf_vfexp_v_bf16m8(vbfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16m8(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_sf_vfexp_v_bf16mf4_m( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]] +// +vbfloat16mf4_t test_sf_vfexp_v_bf16mf4_m(vbool64_t vm, vbfloat16mf4_t vs2, +                                         size_t vl) { +  return __riscv_sf_vfexp_v_bf16mf4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_sf_vfexp_v_bf16mf2_m( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]] +// +vbfloat16mf2_t test_sf_vfexp_v_bf16mf2_m(vbool32_t vm, vbfloat16mf2_t vs2, +                                         size_t vl) { +  return __riscv_sf_vfexp_v_bf16mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_sf_vfexp_v_bf16m1_m( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]] +// +vbfloat16m1_t test_sf_vfexp_v_bf16m1_m(vbool16_t vm, vbfloat16m1_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexp_v_bf16m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_sf_vfexp_v_bf16m2_m( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]] +// +vbfloat16m2_t test_sf_vfexp_v_bf16m2_m(vbool8_t vm, vbfloat16m2_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexp_v_bf16m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_sf_vfexp_v_bf16m4_m( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]] +// +vbfloat16m4_t test_sf_vfexp_v_bf16m4_m(vbool4_t vm, vbfloat16m4_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexp_v_bf16m4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_sf_vfexp_v_bf16m8_m( +// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]] +// +vbfloat16m8_t test_sf_vfexp_v_bf16m8_m(vbool2_t vm, vbfloat16m8_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexp_v_bf16m8_m(vm, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/non-overloaded/sf_vfexpa_v.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/non-overloaded/sf_vfexpa_v.c new file mode 100644 index 0000000..67a9220 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/non-overloaded/sf_vfexpa_v.c @@ -0,0 +1,234 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64f -target-feature +zvfh \ +// RUN:   -target-feature +xsfvfexpa -disable-O0-optnone  \ +// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN:   FileCheck --check-prefix=CHECK-RV64 %s + +#include <sifive_vector.h> + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_sf_vfexpa_v_f16mf4( +// CHECK-RV64-SAME: <vscale x 1 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vfexpa.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]] +// +vfloat16mf4_t test_sf_vfexpa_v_f16mf4(vfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16mf4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_sf_vfexpa_v_f16mf2( +// CHECK-RV64-SAME: <vscale x 2 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vfexpa.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]] +// +vfloat16mf2_t test_sf_vfexpa_v_f16mf2(vfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_sf_vfexpa_v_f16m1( +// CHECK-RV64-SAME: <vscale x 4 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vfexpa.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]] +// +vfloat16m1_t test_sf_vfexpa_v_f16m1(vfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_sf_vfexpa_v_f16m2( +// CHECK-RV64-SAME: <vscale x 8 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vfexpa.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]] +// +vfloat16m2_t test_sf_vfexpa_v_f16m2(vfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_sf_vfexpa_v_f16m4( +// CHECK-RV64-SAME: <vscale x 16 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vfexpa.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]] +// +vfloat16m4_t test_sf_vfexpa_v_f16m4(vfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_sf_vfexpa_v_f16m8( +// CHECK-RV64-SAME: <vscale x 32 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vfexpa.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]] +// +vfloat16m8_t test_sf_vfexpa_v_f16m8(vfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16m8(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_sf_vfexpa_v_f32mf2( +// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vfexpa.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]] +// +vfloat32mf2_t test_sf_vfexpa_v_f32mf2(vfloat32mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_sf_vfexpa_v_f32m1( +// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vfexpa.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]] +// +vfloat32m1_t test_sf_vfexpa_v_f32m1(vfloat32m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_sf_vfexpa_v_f32m2( +// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vfexpa.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]] +// +vfloat32m2_t test_sf_vfexpa_v_f32m2(vfloat32m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_sf_vfexpa_v_f32m4( +// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vfexpa.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]] +// +vfloat32m4_t test_sf_vfexpa_v_f32m4(vfloat32m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_sf_vfexpa_v_f32m8( +// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vfexpa.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]] +// +vfloat32m8_t test_sf_vfexpa_v_f32m8(vfloat32m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32m8(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_sf_vfexpa_v_f16mf4_m( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vfexpa.mask.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]] +// +vfloat16mf4_t test_sf_vfexpa_v_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, +                                        size_t vl) { +  return __riscv_sf_vfexpa_v_f16mf4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_sf_vfexpa_v_f16mf2_m( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vfexpa.mask.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]] +// +vfloat16mf2_t test_sf_vfexpa_v_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, +                                        size_t vl) { +  return __riscv_sf_vfexpa_v_f16mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_sf_vfexpa_v_f16m1_m( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vfexpa.mask.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]] +// +vfloat16m1_t test_sf_vfexpa_v_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, +                                      size_t vl) { +  return __riscv_sf_vfexpa_v_f16m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_sf_vfexpa_v_f16m2_m( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vfexpa.mask.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]] +// +vfloat16m2_t test_sf_vfexpa_v_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_sf_vfexpa_v_f16m4_m( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vfexpa.mask.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]] +// +vfloat16m4_t test_sf_vfexpa_v_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16m4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_sf_vfexpa_v_f16m8_m( +// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vfexpa.mask.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]] +// +vfloat16m8_t test_sf_vfexpa_v_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16m8_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_sf_vfexpa_v_f32mf2_m( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vfexpa.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]] +// +vfloat32mf2_t test_sf_vfexpa_v_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, +                                        size_t vl) { +  return __riscv_sf_vfexpa_v_f32mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_sf_vfexpa_v_f32m1_m( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vfexpa.mask.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]] +// +vfloat32m1_t test_sf_vfexpa_v_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, +                                      size_t vl) { +  return __riscv_sf_vfexpa_v_f32m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_sf_vfexpa_v_f32m2_m( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vfexpa.mask.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]] +// +vfloat32m2_t test_sf_vfexpa_v_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, +                                      size_t vl) { +  return __riscv_sf_vfexpa_v_f32m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_sf_vfexpa_v_f32m4_m( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vfexpa.mask.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]] +// +vfloat32m4_t test_sf_vfexpa_v_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32m4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_sf_vfexpa_v_f32m8_m( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vfexpa.mask.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]] +// +vfloat32m8_t test_sf_vfexpa_v_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32m8_m(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/non-overloaded/sf_vfexpa_v_64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/non-overloaded/sf_vfexpa_v_64.c new file mode 100644 index 0000000..fd6f82d --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/non-overloaded/sf_vfexpa_v_64.c @@ -0,0 +1,90 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +xsfvfexpa64e \ +// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN:   FileCheck --check-prefix=CHECK-RV64 %s + +#include <sifive_vector.h> + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_sf_vfexpa_v_f64m1( +// CHECK-RV64-SAME: <vscale x 1 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vfexpa.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]] +// +vfloat64m1_t test_sf_vfexpa_v_f64m1(vfloat64m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f64m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_sf_vfexpa_v_f64m2( +// CHECK-RV64-SAME: <vscale x 2 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vfexpa.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]] +// +vfloat64m2_t test_sf_vfexpa_v_f64m2(vfloat64m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f64m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_sf_vfexpa_v_f64m4( +// CHECK-RV64-SAME: <vscale x 4 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vfexpa.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]] +// +vfloat64m4_t test_sf_vfexpa_v_f64m4(vfloat64m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f64m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_sf_vfexpa_v_f64m8( +// CHECK-RV64-SAME: <vscale x 8 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vfexpa.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]] +// +vfloat64m8_t test_sf_vfexpa_v_f64m8(vfloat64m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f64m8(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_sf_vfexpa_v_f64m1_m( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vfexpa.mask.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]] +// +vfloat64m1_t test_sf_vfexpa_v_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, +                                      size_t vl) { +  return __riscv_sf_vfexpa_v_f64m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_sf_vfexpa_v_f64m2_m( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vfexpa.mask.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]] +// +vfloat64m2_t test_sf_vfexpa_v_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, +                                      size_t vl) { +  return __riscv_sf_vfexpa_v_f64m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_sf_vfexpa_v_f64m4_m( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vfexpa.mask.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]] +// +vfloat64m4_t test_sf_vfexpa_v_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, +                                      size_t vl) { +  return __riscv_sf_vfexpa_v_f64m4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_sf_vfexpa_v_f64m8_m( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vfexpa.mask.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]] +// +vfloat64m8_t test_sf_vfexpa_v_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f64m8_m(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/overloaded/sf_vfexp_v_16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/overloaded/sf_vfexp_v_16.c new file mode 100644 index 0000000..0e769ed --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/overloaded/sf_vfexp_v_16.c @@ -0,0 +1,131 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x -target-feature +zvfh \ +// RUN:   -target-feature +xsfvfexp16e -disable-O0-optnone  \ +// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN:   FileCheck --check-prefix=CHECK-RV64 %s + +#include <sifive_vector.h> + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_sf_vfexp_v_f16mf4( +// CHECK-RV64-SAME: <vscale x 1 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vfexp.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]] +// +vfloat16mf4_t test_sf_vfexp_v_f16mf4(vfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexp(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_sf_vfexp_v_f16mf2( +// CHECK-RV64-SAME: <vscale x 2 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vfexp.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]] +// +vfloat16mf2_t test_sf_vfexp_v_f16mf2(vfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_sf_vfexp_v_f16m1( +// CHECK-RV64-SAME: <vscale x 4 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vfexp.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]] +// +vfloat16m1_t test_sf_vfexp_v_f16m1(vfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_sf_vfexp_v_f16m2( +// CHECK-RV64-SAME: <vscale x 8 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vfexp.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]] +// +vfloat16m2_t test_sf_vfexp_v_f16m2(vfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_sf_vfexp_v_f16m4( +// CHECK-RV64-SAME: <vscale x 16 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vfexp.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]] +// +vfloat16m4_t test_sf_vfexp_v_f16m4(vfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_sf_vfexp_v_f16m8( +// CHECK-RV64-SAME: <vscale x 32 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vfexp.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]] +// +vfloat16m8_t test_sf_vfexp_v_f16m8(vfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_sf_vfexp_v_f16mf4_m( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vfexp.mask.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]] +// +vfloat16mf4_t test_sf_vfexp_v_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexp(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_sf_vfexp_v_f16mf2_m( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vfexp.mask.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]] +// +vfloat16mf2_t test_sf_vfexp_v_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexp(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_sf_vfexp_v_f16m1_m( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vfexp.mask.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]] +// +vfloat16m1_t test_sf_vfexp_v_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, +                                     size_t vl) { +  return __riscv_sf_vfexp(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_sf_vfexp_v_f16m2_m( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vfexp.mask.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]] +// +vfloat16m2_t test_sf_vfexp_v_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_sf_vfexp_v_f16m4_m( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vfexp.mask.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]] +// +vfloat16m4_t test_sf_vfexp_v_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_sf_vfexp_v_f16m8_m( +// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vfexp.mask.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]] +// +vfloat16m8_t test_sf_vfexp_v_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/overloaded/sf_vfexp_v_32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/overloaded/sf_vfexp_v_32.c new file mode 100644 index 0000000..3df1eaa --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/overloaded/sf_vfexp_v_32.c @@ -0,0 +1,111 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x -target-feature +zve32f \ +// RUN:   -target-feature +xsfvfexp32e -disable-O0-optnone  \ +// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN:   FileCheck --check-prefix=CHECK-RV64 %s + +#include <sifive_vector.h> + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_sf_vfexp_v_f32mf2( +// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vfexp.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]] +// +vfloat32mf2_t test_sf_vfexp_v_f32mf2(vfloat32mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_sf_vfexp_v_f32m1( +// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vfexp.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]] +// +vfloat32m1_t test_sf_vfexp_v_f32m1(vfloat32m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_sf_vfexp_v_f32m2( +// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vfexp.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]] +// +vfloat32m2_t test_sf_vfexp_v_f32m2(vfloat32m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_sf_vfexp_v_f32m4( +// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vfexp.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]] +// +vfloat32m4_t test_sf_vfexp_v_f32m4(vfloat32m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_sf_vfexp_v_f32m8( +// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vfexp.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]] +// +vfloat32m8_t test_sf_vfexp_v_f32m8(vfloat32m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_sf_vfexp_v_f32mf2_m( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vfexp.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]] +// +vfloat32mf2_t test_sf_vfexp_v_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexp(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_sf_vfexp_v_f32m1_m( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vfexp.mask.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]] +// +vfloat32m1_t test_sf_vfexp_v_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, +                                     size_t vl) { +  return __riscv_sf_vfexp(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_sf_vfexp_v_f32m2_m( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vfexp.mask.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]] +// +vfloat32m2_t test_sf_vfexp_v_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, +                                     size_t vl) { +  return __riscv_sf_vfexp(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_sf_vfexp_v_f32m4_m( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vfexp.mask.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]] +// +vfloat32m4_t test_sf_vfexp_v_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_sf_vfexp_v_f32m8_m( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vfexp.mask.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]] +// +vfloat32m8_t test_sf_vfexp_v_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/overloaded/sf_vfexp_v_bf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/overloaded/sf_vfexp_v_bf.c new file mode 100644 index 0000000..6179dbe --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/overloaded/sf_vfexp_v_bf.c @@ -0,0 +1,134 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x -target-feature +zve32f \ +// RUN:   -target-feature +zvfbfmin -target-feature +xsfvfbfexp16e -disable-O0-optnone  \ +// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN:   FileCheck --check-prefix=CHECK-RV64 %s + +#include <sifive_vector.h> + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_sf_vfexp_v_bf16mf4( +// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.sf.vfexp.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]] +// +vbfloat16mf4_t test_sf_vfexp_v_bf16mf4(vbfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexp(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_sf_vfexp_v_bf16mf2( +// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.sf.vfexp.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]] +// +vbfloat16mf2_t test_sf_vfexp_v_bf16mf2(vbfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_sf_vfexp_v_bf16m1( +// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.sf.vfexp.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]] +// +vbfloat16m1_t test_sf_vfexp_v_bf16m1(vbfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_sf_vfexp_v_bf16m2( +// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.sf.vfexp.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]] +// +vbfloat16m2_t test_sf_vfexp_v_bf16m2(vbfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_sf_vfexp_v_bf16m4( +// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.sf.vfexp.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]] +// +vbfloat16m4_t test_sf_vfexp_v_bf16m4(vbfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_sf_vfexp_v_bf16m8( +// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.sf.vfexp.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]] +// +vbfloat16m8_t test_sf_vfexp_v_bf16m8(vbfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_sf_vfexp_v_bf16mf4_m( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]] +// +vbfloat16mf4_t test_sf_vfexp_v_bf16mf4_m(vbool64_t vm, vbfloat16mf4_t vs2, +                                         size_t vl) { +  return __riscv_sf_vfexp(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_sf_vfexp_v_bf16mf2_m( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]] +// +vbfloat16mf2_t test_sf_vfexp_v_bf16mf2_m(vbool32_t vm, vbfloat16mf2_t vs2, +                                         size_t vl) { +  return __riscv_sf_vfexp(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_sf_vfexp_v_bf16m1_m( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]] +// +vbfloat16m1_t test_sf_vfexp_v_bf16m1_m(vbool16_t vm, vbfloat16m1_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexp(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_sf_vfexp_v_bf16m2_m( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]] +// +vbfloat16m2_t test_sf_vfexp_v_bf16m2_m(vbool8_t vm, vbfloat16m2_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexp(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_sf_vfexp_v_bf16m4_m( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]] +// +vbfloat16m4_t test_sf_vfexp_v_bf16m4_m(vbool4_t vm, vbfloat16m4_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexp(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_sf_vfexp_v_bf16m8_m( +// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]] +// +vbfloat16m8_t test_sf_vfexp_v_bf16m8_m(vbool2_t vm, vbfloat16m8_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexp(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/overloaded/sf_vfexpa_v.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/overloaded/sf_vfexpa_v.c new file mode 100644 index 0000000..1ddbb0b --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/overloaded/sf_vfexpa_v.c @@ -0,0 +1,234 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64f -target-feature +zvfh \ +// RUN:   -target-feature +xsfvfexpa -disable-O0-optnone  \ +// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN:   FileCheck --check-prefix=CHECK-RV64 %s + +#include <sifive_vector.h> + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_sf_vfexpa_v_f16mf4( +// CHECK-RV64-SAME: <vscale x 1 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vfexpa.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]] +// +vfloat16mf4_t test_sf_vfexpa_v_f16mf4(vfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_sf_vfexpa_v_f16mf2( +// CHECK-RV64-SAME: <vscale x 2 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vfexpa.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]] +// +vfloat16mf2_t test_sf_vfexpa_v_f16mf2(vfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_sf_vfexpa_v_f16m1( +// CHECK-RV64-SAME: <vscale x 4 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vfexpa.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]] +// +vfloat16m1_t test_sf_vfexpa_v_f16m1(vfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_sf_vfexpa_v_f16m2( +// CHECK-RV64-SAME: <vscale x 8 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vfexpa.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]] +// +vfloat16m2_t test_sf_vfexpa_v_f16m2(vfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_sf_vfexpa_v_f16m4( +// CHECK-RV64-SAME: <vscale x 16 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vfexpa.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]] +// +vfloat16m4_t test_sf_vfexpa_v_f16m4(vfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_sf_vfexpa_v_f16m8( +// CHECK-RV64-SAME: <vscale x 32 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vfexpa.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]] +// +vfloat16m8_t test_sf_vfexpa_v_f16m8(vfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_sf_vfexpa_v_f32mf2( +// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vfexpa.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]] +// +vfloat32mf2_t test_sf_vfexpa_v_f32mf2(vfloat32mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_sf_vfexpa_v_f32m1( +// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vfexpa.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]] +// +vfloat32m1_t test_sf_vfexpa_v_f32m1(vfloat32m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_sf_vfexpa_v_f32m2( +// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vfexpa.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]] +// +vfloat32m2_t test_sf_vfexpa_v_f32m2(vfloat32m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_sf_vfexpa_v_f32m4( +// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vfexpa.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]] +// +vfloat32m4_t test_sf_vfexpa_v_f32m4(vfloat32m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_sf_vfexpa_v_f32m8( +// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vfexpa.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]] +// +vfloat32m8_t test_sf_vfexpa_v_f32m8(vfloat32m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_sf_vfexpa_v_f16mf4_m( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vfexpa.mask.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]] +// +vfloat16mf4_t test_sf_vfexpa_v_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, +                                        size_t vl) { +  return __riscv_sf_vfexpa(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_sf_vfexpa_v_f16mf2_m( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vfexpa.mask.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]] +// +vfloat16mf2_t test_sf_vfexpa_v_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, +                                        size_t vl) { +  return __riscv_sf_vfexpa(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_sf_vfexpa_v_f16m1_m( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vfexpa.mask.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]] +// +vfloat16m1_t test_sf_vfexpa_v_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, +                                      size_t vl) { +  return __riscv_sf_vfexpa(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_sf_vfexpa_v_f16m2_m( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vfexpa.mask.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]] +// +vfloat16m2_t test_sf_vfexpa_v_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_sf_vfexpa_v_f16m4_m( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vfexpa.mask.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]] +// +vfloat16m4_t test_sf_vfexpa_v_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_sf_vfexpa_v_f16m8_m( +// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vfexpa.mask.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]] +// +vfloat16m8_t test_sf_vfexpa_v_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_sf_vfexpa_v_f32mf2_m( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vfexpa.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]] +// +vfloat32mf2_t test_sf_vfexpa_v_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, +                                        size_t vl) { +  return __riscv_sf_vfexpa(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_sf_vfexpa_v_f32m1_m( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vfexpa.mask.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]] +// +vfloat32m1_t test_sf_vfexpa_v_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, +                                      size_t vl) { +  return __riscv_sf_vfexpa(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_sf_vfexpa_v_f32m2_m( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vfexpa.mask.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]] +// +vfloat32m2_t test_sf_vfexpa_v_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, +                                      size_t vl) { +  return __riscv_sf_vfexpa(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_sf_vfexpa_v_f32m4_m( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vfexpa.mask.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]] +// +vfloat32m4_t test_sf_vfexpa_v_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_sf_vfexpa_v_f32m8_m( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vfexpa.mask.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]] +// +vfloat32m8_t test_sf_vfexpa_v_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/overloaded/sf_vfexpa_v_64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/overloaded/sf_vfexpa_v_64.c new file mode 100644 index 0000000..165879a --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/non-policy/overloaded/sf_vfexpa_v_64.c @@ -0,0 +1,90 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +xsfvfexpa64e \ +// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN:   FileCheck --check-prefix=CHECK-RV64 %s + +#include <sifive_vector.h> + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_sf_vfexpa_v_f64m1( +// CHECK-RV64-SAME: <vscale x 1 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vfexpa.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]] +// +vfloat64m1_t test_sf_vfexpa_v_f64m1(vfloat64m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_sf_vfexpa_v_f64m2( +// CHECK-RV64-SAME: <vscale x 2 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vfexpa.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]] +// +vfloat64m2_t test_sf_vfexpa_v_f64m2(vfloat64m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_sf_vfexpa_v_f64m4( +// CHECK-RV64-SAME: <vscale x 4 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vfexpa.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]] +// +vfloat64m4_t test_sf_vfexpa_v_f64m4(vfloat64m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_sf_vfexpa_v_f64m8( +// CHECK-RV64-SAME: <vscale x 8 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vfexpa.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]] +// +vfloat64m8_t test_sf_vfexpa_v_f64m8(vfloat64m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_sf_vfexpa_v_f64m1_m( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vfexpa.mask.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]] +// +vfloat64m1_t test_sf_vfexpa_v_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, +                                      size_t vl) { +  return __riscv_sf_vfexpa(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_sf_vfexpa_v_f64m2_m( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vfexpa.mask.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]] +// +vfloat64m2_t test_sf_vfexpa_v_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, +                                      size_t vl) { +  return __riscv_sf_vfexpa(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_sf_vfexpa_v_f64m4_m( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vfexpa.mask.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]] +// +vfloat64m4_t test_sf_vfexpa_v_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, +                                      size_t vl) { +  return __riscv_sf_vfexpa(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_sf_vfexpa_v_f64m8_m( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vfexpa.mask.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]] +// +vfloat64m8_t test_sf_vfexpa_v_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/non-overloaded/sf_vfexp_v_16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/non-overloaded/sf_vfexp_v_16.c new file mode 100644 index 0000000..aed6d87 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/non-overloaded/sf_vfexp_v_16.c @@ -0,0 +1,248 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x -target-feature +zvfh \ +// RUN:   -target-feature +xsfvfexp16e -disable-O0-optnone  \ +// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN:   FileCheck --check-prefix=CHECK-RV64 %s + +#include <sifive_vector.h> + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_sf_vfexp_v_f16mf4_tu( +// CHECK-RV64-SAME: <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vfexp.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]] +// +vfloat16mf4_t test_sf_vfexp_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16mf4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_sf_vfexp_v_f16mf2_tu( +// CHECK-RV64-SAME: <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vfexp.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]] +// +vfloat16mf2_t test_sf_vfexp_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_sf_vfexp_v_f16m1_tu( +// CHECK-RV64-SAME: <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vfexp.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]] +// +vfloat16m1_t test_sf_vfexp_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_sf_vfexp_v_f16m2_tu( +// CHECK-RV64-SAME: <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vfexp.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]] +// +vfloat16m2_t test_sf_vfexp_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_sf_vfexp_v_f16m4_tu( +// CHECK-RV64-SAME: <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vfexp.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]] +// +vfloat16m4_t test_sf_vfexp_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_sf_vfexp_v_f16m8_tu( +// CHECK-RV64-SAME: <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vfexp.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]] +// +vfloat16m8_t test_sf_vfexp_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16m8_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_sf_vfexp_v_f16mf4_tum( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vfexp.mask.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]] +// +vfloat16mf4_t test_sf_vfexp_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16mf4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_sf_vfexp_v_f16mf2_tum( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vfexp.mask.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]] +// +vfloat16mf2_t test_sf_vfexp_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_sf_vfexp_v_f16m1_tum( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vfexp.mask.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]] +// +vfloat16m1_t test_sf_vfexp_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_sf_vfexp_v_f16m2_tum( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vfexp.mask.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]] +// +vfloat16m2_t test_sf_vfexp_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_sf_vfexp_v_f16m4_tum( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vfexp.mask.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]] +// +vfloat16m4_t test_sf_vfexp_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_sf_vfexp_v_f16m8_tum( +// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vfexp.mask.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]] +// +vfloat16m8_t test_sf_vfexp_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16m8_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_sf_vfexp_v_f16mf4_tumu( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vfexp.mask.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]] +// +vfloat16mf4_t test_sf_vfexp_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16mf4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_sf_vfexp_v_f16mf2_tumu( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vfexp.mask.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]] +// +vfloat16mf2_t test_sf_vfexp_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_sf_vfexp_v_f16m1_tumu( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vfexp.mask.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]] +// +vfloat16m1_t test_sf_vfexp_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_sf_vfexp_v_f16m2_tumu( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vfexp.mask.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]] +// +vfloat16m2_t test_sf_vfexp_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_sf_vfexp_v_f16m4_tumu( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vfexp.mask.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]] +// +vfloat16m4_t test_sf_vfexp_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_sf_vfexp_v_f16m8_tumu( +// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vfexp.mask.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]] +// +vfloat16m8_t test_sf_vfexp_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16m8_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_sf_vfexp_v_f16mf4_mu( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vfexp.mask.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]] +// +vfloat16mf4_t test_sf_vfexp_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16mf4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_sf_vfexp_v_f16mf2_mu( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vfexp.mask.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]] +// +vfloat16mf2_t test_sf_vfexp_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_sf_vfexp_v_f16m1_mu( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vfexp.mask.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]] +// +vfloat16m1_t test_sf_vfexp_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_sf_vfexp_v_f16m2_mu( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vfexp.mask.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]] +// +vfloat16m2_t test_sf_vfexp_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_sf_vfexp_v_f16m4_mu( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vfexp.mask.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]] +// +vfloat16m4_t test_sf_vfexp_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16m4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_sf_vfexp_v_f16m8_mu( +// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vfexp.mask.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]] +// +vfloat16m8_t test_sf_vfexp_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f16m8_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/non-overloaded/sf_vfexp_v_32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/non-overloaded/sf_vfexp_v_32.c new file mode 100644 index 0000000..374f324 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/non-overloaded/sf_vfexp_v_32.c @@ -0,0 +1,208 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x -target-feature +zve32f \ +// RUN:   -target-feature +xsfvfexp32e -disable-O0-optnone  \ +// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN:   FileCheck --check-prefix=CHECK-RV64 %s + +#include <sifive_vector.h> + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_sf_vfexp_v_f32mf2_tu( +// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vfexp.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]] +// +vfloat32mf2_t test_sf_vfexp_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_sf_vfexp_v_f32m1_tu( +// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vfexp.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]] +// +vfloat32m1_t test_sf_vfexp_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_sf_vfexp_v_f32m2_tu( +// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vfexp.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]] +// +vfloat32m2_t test_sf_vfexp_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_sf_vfexp_v_f32m4_tu( +// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vfexp.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]] +// +vfloat32m4_t test_sf_vfexp_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_sf_vfexp_v_f32m8_tu( +// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vfexp.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]] +// +vfloat32m8_t test_sf_vfexp_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32m8_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_sf_vfexp_v_f32mf2_tum( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vfexp.mask.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]] +// +vfloat32mf2_t test_sf_vfexp_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_sf_vfexp_v_f32m1_tum( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vfexp.mask.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]] +// +vfloat32m1_t test_sf_vfexp_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_sf_vfexp_v_f32m2_tum( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vfexp.mask.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]] +// +vfloat32m2_t test_sf_vfexp_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_sf_vfexp_v_f32m4_tum( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vfexp.mask.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]] +// +vfloat32m4_t test_sf_vfexp_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_sf_vfexp_v_f32m8_tum( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vfexp.mask.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]] +// +vfloat32m8_t test_sf_vfexp_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32m8_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_sf_vfexp_v_f32mf2_tumu( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vfexp.mask.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]] +// +vfloat32mf2_t test_sf_vfexp_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_sf_vfexp_v_f32m1_tumu( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vfexp.mask.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]] +// +vfloat32m1_t test_sf_vfexp_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_sf_vfexp_v_f32m2_tumu( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vfexp.mask.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]] +// +vfloat32m2_t test_sf_vfexp_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_sf_vfexp_v_f32m4_tumu( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vfexp.mask.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]] +// +vfloat32m4_t test_sf_vfexp_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_sf_vfexp_v_f32m8_tumu( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vfexp.mask.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]] +// +vfloat32m8_t test_sf_vfexp_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32m8_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_sf_vfexp_v_f32mf2_mu( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vfexp.mask.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]] +// +vfloat32mf2_t test_sf_vfexp_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_sf_vfexp_v_f32m1_mu( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vfexp.mask.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]] +// +vfloat32m1_t test_sf_vfexp_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_sf_vfexp_v_f32m2_mu( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vfexp.mask.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]] +// +vfloat32m2_t test_sf_vfexp_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_sf_vfexp_v_f32m4_mu( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vfexp.mask.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]] +// +vfloat32m4_t test_sf_vfexp_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32m4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_sf_vfexp_v_f32m8_mu( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vfexp.mask.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]] +// +vfloat32m8_t test_sf_vfexp_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_f32m8_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/non-overloaded/sf_vfexp_v_bf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/non-overloaded/sf_vfexp_v_bf.c new file mode 100644 index 0000000..aec0b9f --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/non-overloaded/sf_vfexp_v_bf.c @@ -0,0 +1,248 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x -target-feature +zve32f \ +// RUN:   -target-feature +zvfbfmin -target-feature +xsfvfbfexp16e -disable-O0-optnone  \ +// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN:   FileCheck --check-prefix=CHECK-RV64 %s + +#include <sifive_vector.h> + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_sf_vfexp_v_bf16mf4_tu( +// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.sf.vfexp.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]] +// +vbfloat16mf4_t test_sf_vfexp_v_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16mf4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_sf_vfexp_v_bf16mf2_tu( +// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.sf.vfexp.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]] +// +vbfloat16mf2_t test_sf_vfexp_v_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_sf_vfexp_v_bf16m1_tu( +// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.sf.vfexp.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]] +// +vbfloat16m1_t test_sf_vfexp_v_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_sf_vfexp_v_bf16m2_tu( +// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.sf.vfexp.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]] +// +vbfloat16m2_t test_sf_vfexp_v_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_sf_vfexp_v_bf16m4_tu( +// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.sf.vfexp.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]] +// +vbfloat16m4_t test_sf_vfexp_v_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_sf_vfexp_v_bf16m8_tu( +// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.sf.vfexp.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]] +// +vbfloat16m8_t test_sf_vfexp_v_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16m8_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_sf_vfexp_v_bf16mf4_tum( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]] +// +vbfloat16mf4_t test_sf_vfexp_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, vbfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16mf4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_sf_vfexp_v_bf16mf2_tum( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]] +// +vbfloat16mf2_t test_sf_vfexp_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, vbfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_sf_vfexp_v_bf16m1_tum( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]] +// +vbfloat16m1_t test_sf_vfexp_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, vbfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_sf_vfexp_v_bf16m2_tum( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]] +// +vbfloat16m2_t test_sf_vfexp_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, vbfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_sf_vfexp_v_bf16m4_tum( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]] +// +vbfloat16m4_t test_sf_vfexp_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, vbfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_sf_vfexp_v_bf16m8_tum( +// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]] +// +vbfloat16m8_t test_sf_vfexp_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, vbfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16m8_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_sf_vfexp_v_bf16mf4_tumu( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]] +// +vbfloat16mf4_t test_sf_vfexp_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, vbfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16mf4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_sf_vfexp_v_bf16mf2_tumu( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]] +// +vbfloat16mf2_t test_sf_vfexp_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, vbfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_sf_vfexp_v_bf16m1_tumu( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]] +// +vbfloat16m1_t test_sf_vfexp_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, vbfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_sf_vfexp_v_bf16m2_tumu( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]] +// +vbfloat16m2_t test_sf_vfexp_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, vbfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_sf_vfexp_v_bf16m4_tumu( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]] +// +vbfloat16m4_t test_sf_vfexp_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, vbfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_sf_vfexp_v_bf16m8_tumu( +// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]] +// +vbfloat16m8_t test_sf_vfexp_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, vbfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16m8_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_sf_vfexp_v_bf16mf4_mu( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]] +// +vbfloat16mf4_t test_sf_vfexp_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, vbfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16mf4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_sf_vfexp_v_bf16mf2_mu( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]] +// +vbfloat16mf2_t test_sf_vfexp_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, vbfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_sf_vfexp_v_bf16m1_mu( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]] +// +vbfloat16m1_t test_sf_vfexp_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, vbfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_sf_vfexp_v_bf16m2_mu( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]] +// +vbfloat16m2_t test_sf_vfexp_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, vbfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_sf_vfexp_v_bf16m4_mu( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]] +// +vbfloat16m4_t test_sf_vfexp_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, vbfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16m4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_sf_vfexp_v_bf16m8_mu( +// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]] +// +vbfloat16m8_t test_sf_vfexp_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, vbfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_v_bf16m8_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/non-overloaded/sf_vfexpa_v.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/non-overloaded/sf_vfexpa_v.c new file mode 100644 index 0000000..b687026 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/non-overloaded/sf_vfexpa_v.c @@ -0,0 +1,448 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64f -target-feature +zvfh \ +// RUN:   -target-feature +xsfvfexpa -disable-O0-optnone  \ +// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN:   FileCheck --check-prefix=CHECK-RV64 %s + +#include <sifive_vector.h> + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_sf_vfexpa_v_f16mf4_tu( +// CHECK-RV64-SAME: <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vfexpa.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]] +// +vfloat16mf4_t test_sf_vfexpa_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16mf4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_sf_vfexpa_v_f16mf2_tu( +// CHECK-RV64-SAME: <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vfexpa.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]] +// +vfloat16mf2_t test_sf_vfexpa_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_sf_vfexpa_v_f16m1_tu( +// CHECK-RV64-SAME: <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vfexpa.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]] +// +vfloat16m1_t test_sf_vfexpa_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_sf_vfexpa_v_f16m2_tu( +// CHECK-RV64-SAME: <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vfexpa.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]] +// +vfloat16m2_t test_sf_vfexpa_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_sf_vfexpa_v_f16m4_tu( +// CHECK-RV64-SAME: <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vfexpa.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]] +// +vfloat16m4_t test_sf_vfexpa_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_sf_vfexpa_v_f16m8_tu( +// CHECK-RV64-SAME: <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vfexpa.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]] +// +vfloat16m8_t test_sf_vfexpa_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16m8_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_sf_vfexpa_v_f32mf2_tu( +// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vfexpa.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]] +// +vfloat32mf2_t test_sf_vfexpa_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_sf_vfexpa_v_f32m1_tu( +// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vfexpa.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]] +// +vfloat32m1_t test_sf_vfexpa_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_sf_vfexpa_v_f32m2_tu( +// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vfexpa.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]] +// +vfloat32m2_t test_sf_vfexpa_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_sf_vfexpa_v_f32m4_tu( +// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vfexpa.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]] +// +vfloat32m4_t test_sf_vfexpa_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_sf_vfexpa_v_f32m8_tu( +// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vfexpa.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]] +// +vfloat32m8_t test_sf_vfexpa_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32m8_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_sf_vfexpa_v_f16mf4_tum( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vfexpa.mask.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]] +// +vfloat16mf4_t test_sf_vfexpa_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16mf4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_sf_vfexpa_v_f16mf2_tum( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vfexpa.mask.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]] +// +vfloat16mf2_t test_sf_vfexpa_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_sf_vfexpa_v_f16m1_tum( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vfexpa.mask.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]] +// +vfloat16m1_t test_sf_vfexpa_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_sf_vfexpa_v_f16m2_tum( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vfexpa.mask.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]] +// +vfloat16m2_t test_sf_vfexpa_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_sf_vfexpa_v_f16m4_tum( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vfexpa.mask.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]] +// +vfloat16m4_t test_sf_vfexpa_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_sf_vfexpa_v_f16m8_tum( +// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vfexpa.mask.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]] +// +vfloat16m8_t test_sf_vfexpa_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16m8_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_sf_vfexpa_v_f32mf2_tum( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vfexpa.mask.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]] +// +vfloat32mf2_t test_sf_vfexpa_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_sf_vfexpa_v_f32m1_tum( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vfexpa.mask.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]] +// +vfloat32m1_t test_sf_vfexpa_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_sf_vfexpa_v_f32m2_tum( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vfexpa.mask.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]] +// +vfloat32m2_t test_sf_vfexpa_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_sf_vfexpa_v_f32m4_tum( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vfexpa.mask.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]] +// +vfloat32m4_t test_sf_vfexpa_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_sf_vfexpa_v_f32m8_tum( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vfexpa.mask.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]] +// +vfloat32m8_t test_sf_vfexpa_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32m8_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_sf_vfexpa_v_f16mf4_tumu( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vfexpa.mask.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]] +// +vfloat16mf4_t test_sf_vfexpa_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16mf4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_sf_vfexpa_v_f16mf2_tumu( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vfexpa.mask.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]] +// +vfloat16mf2_t test_sf_vfexpa_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_sf_vfexpa_v_f16m1_tumu( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vfexpa.mask.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]] +// +vfloat16m1_t test_sf_vfexpa_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_sf_vfexpa_v_f16m2_tumu( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vfexpa.mask.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]] +// +vfloat16m2_t test_sf_vfexpa_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_sf_vfexpa_v_f16m4_tumu( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vfexpa.mask.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]] +// +vfloat16m4_t test_sf_vfexpa_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_sf_vfexpa_v_f16m8_tumu( +// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vfexpa.mask.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]] +// +vfloat16m8_t test_sf_vfexpa_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16m8_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_sf_vfexpa_v_f32mf2_tumu( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vfexpa.mask.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]] +// +vfloat32mf2_t test_sf_vfexpa_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_sf_vfexpa_v_f32m1_tumu( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vfexpa.mask.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]] +// +vfloat32m1_t test_sf_vfexpa_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_sf_vfexpa_v_f32m2_tumu( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vfexpa.mask.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]] +// +vfloat32m2_t test_sf_vfexpa_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_sf_vfexpa_v_f32m4_tumu( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vfexpa.mask.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]] +// +vfloat32m4_t test_sf_vfexpa_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_sf_vfexpa_v_f32m8_tumu( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vfexpa.mask.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]] +// +vfloat32m8_t test_sf_vfexpa_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32m8_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_sf_vfexpa_v_f16mf4_mu( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vfexpa.mask.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]] +// +vfloat16mf4_t test_sf_vfexpa_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16mf4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_sf_vfexpa_v_f16mf2_mu( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vfexpa.mask.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]] +// +vfloat16mf2_t test_sf_vfexpa_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_sf_vfexpa_v_f16m1_mu( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vfexpa.mask.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]] +// +vfloat16m1_t test_sf_vfexpa_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_sf_vfexpa_v_f16m2_mu( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vfexpa.mask.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]] +// +vfloat16m2_t test_sf_vfexpa_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_sf_vfexpa_v_f16m4_mu( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vfexpa.mask.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]] +// +vfloat16m4_t test_sf_vfexpa_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16m4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_sf_vfexpa_v_f16m8_mu( +// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vfexpa.mask.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]] +// +vfloat16m8_t test_sf_vfexpa_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f16m8_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_sf_vfexpa_v_f32mf2_mu( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vfexpa.mask.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]] +// +vfloat32mf2_t test_sf_vfexpa_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_sf_vfexpa_v_f32m1_mu( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vfexpa.mask.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]] +// +vfloat32m1_t test_sf_vfexpa_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_sf_vfexpa_v_f32m2_mu( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vfexpa.mask.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]] +// +vfloat32m2_t test_sf_vfexpa_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_sf_vfexpa_v_f32m4_mu( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vfexpa.mask.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]] +// +vfloat32m4_t test_sf_vfexpa_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32m4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_sf_vfexpa_v_f32m8_mu( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vfexpa.mask.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]] +// +vfloat32m8_t test_sf_vfexpa_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f32m8_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/non-overloaded/sf_vfexpa_v_64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/non-overloaded/sf_vfexpa_v_64.c new file mode 100644 index 0000000..8638dc2 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/non-overloaded/sf_vfexpa_v_64.c @@ -0,0 +1,167 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +xsfvfexpa64e \ +// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN:   FileCheck --check-prefix=CHECK-RV64 %s + +#include <sifive_vector.h> + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_sf_vfexpa_v_f64m1_tu( +// CHECK-RV64-SAME: <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vfexpa.nxv1f64.i64(<vscale x 1 x double> [[VD]], <vscale x 1 x double> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]] +// +vfloat64m1_t test_sf_vfexpa_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f64m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_sf_vfexpa_v_f64m2_tu( +// CHECK-RV64-SAME: <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vfexpa.nxv2f64.i64(<vscale x 2 x double> [[VD]], <vscale x 2 x double> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]] +// +vfloat64m2_t test_sf_vfexpa_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f64m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_sf_vfexpa_v_f64m4_tu( +// CHECK-RV64-SAME: <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vfexpa.nxv4f64.i64(<vscale x 4 x double> [[VD]], <vscale x 4 x double> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]] +// +vfloat64m4_t test_sf_vfexpa_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f64m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_sf_vfexpa_v_f64m8_tu( +// CHECK-RV64-SAME: <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vfexpa.nxv8f64.i64(<vscale x 8 x double> [[VD]], <vscale x 8 x double> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]] +// +vfloat64m8_t test_sf_vfexpa_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f64m8_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_sf_vfexpa_v_f64m1_tum( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vfexpa.mask.nxv1f64.i64(<vscale x 1 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]] +// +vfloat64m1_t test_sf_vfexpa_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f64m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_sf_vfexpa_v_f64m2_tum( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vfexpa.mask.nxv2f64.i64(<vscale x 2 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]] +// +vfloat64m2_t test_sf_vfexpa_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f64m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_sf_vfexpa_v_f64m4_tum( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vfexpa.mask.nxv4f64.i64(<vscale x 4 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]] +// +vfloat64m4_t test_sf_vfexpa_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f64m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_sf_vfexpa_v_f64m8_tum( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vfexpa.mask.nxv8f64.i64(<vscale x 8 x double> [[VD]], <vscale x 8 x double> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]] +// +vfloat64m8_t test_sf_vfexpa_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f64m8_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_sf_vfexpa_v_f64m1_tumu( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vfexpa.mask.nxv1f64.i64(<vscale x 1 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]] +// +vfloat64m1_t test_sf_vfexpa_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f64m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_sf_vfexpa_v_f64m2_tumu( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vfexpa.mask.nxv2f64.i64(<vscale x 2 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]] +// +vfloat64m2_t test_sf_vfexpa_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f64m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_sf_vfexpa_v_f64m4_tumu( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vfexpa.mask.nxv4f64.i64(<vscale x 4 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]] +// +vfloat64m4_t test_sf_vfexpa_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f64m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_sf_vfexpa_v_f64m8_tumu( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vfexpa.mask.nxv8f64.i64(<vscale x 8 x double> [[VD]], <vscale x 8 x double> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]] +// +vfloat64m8_t test_sf_vfexpa_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f64m8_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_sf_vfexpa_v_f64m1_mu( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vfexpa.mask.nxv1f64.i64(<vscale x 1 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]] +// +vfloat64m1_t test_sf_vfexpa_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f64m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_sf_vfexpa_v_f64m2_mu( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vfexpa.mask.nxv2f64.i64(<vscale x 2 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]] +// +vfloat64m2_t test_sf_vfexpa_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f64m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_sf_vfexpa_v_f64m4_mu( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vfexpa.mask.nxv4f64.i64(<vscale x 4 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]] +// +vfloat64m4_t test_sf_vfexpa_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f64m4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_sf_vfexpa_v_f64m8_mu( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vfexpa.mask.nxv8f64.i64(<vscale x 8 x double> [[VD]], <vscale x 8 x double> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]] +// +vfloat64m8_t test_sf_vfexpa_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_v_f64m8_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/overloaded/sf_vfexp_v_16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/overloaded/sf_vfexp_v_16.c new file mode 100644 index 0000000..4ceeb7b --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/overloaded/sf_vfexp_v_16.c @@ -0,0 +1,261 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x -target-feature +zvfh \ +// RUN:   -target-feature +xsfvfexp16e -disable-O0-optnone  \ +// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN:   FileCheck --check-prefix=CHECK-RV64 %s + +#include <sifive_vector.h> + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_sf_vfexp_v_f16mf2_tu( +// CHECK-RV64-SAME: <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vfexp.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]] +// +vfloat16mf2_t test_sf_vfexp_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, +                                        size_t vl) { +  return __riscv_sf_vfexp_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_sf_vfexp_v_f16m1_tu( +// CHECK-RV64-SAME: <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vfexp.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]] +// +vfloat16m1_t test_sf_vfexp_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, +                                      size_t vl) { +  return __riscv_sf_vfexp_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_sf_vfexp_v_f16m2_tu( +// CHECK-RV64-SAME: <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vfexp.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]] +// +vfloat16m2_t test_sf_vfexp_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, +                                      size_t vl) { +  return __riscv_sf_vfexp_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_sf_vfexp_v_f16m4_tu( +// CHECK-RV64-SAME: <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vfexp.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]] +// +vfloat16m4_t test_sf_vfexp_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, +                                      size_t vl) { +  return __riscv_sf_vfexp_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_sf_vfexp_v_f16m8_tu( +// CHECK-RV64-SAME: <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vfexp.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]] +// +vfloat16m8_t test_sf_vfexp_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, +                                      size_t vl) { +  return __riscv_sf_vfexp_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_sf_vfexp_v_f16mf4_tum( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vfexp.mask.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]] +// +vfloat16mf4_t test_sf_vfexp_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, +                                         vfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_sf_vfexp_v_f16mf2_tum( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vfexp.mask.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]] +// +vfloat16mf2_t test_sf_vfexp_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, +                                         vfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_sf_vfexp_v_f16m1_tum( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vfexp.mask.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]] +// +vfloat16m1_t test_sf_vfexp_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, +                                       vfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_sf_vfexp_v_f16m2_tum( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vfexp.mask.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]] +// +vfloat16m2_t test_sf_vfexp_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, +                                       vfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_sf_vfexp_v_f16m4_tum( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vfexp.mask.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]] +// +vfloat16m4_t test_sf_vfexp_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, +                                       vfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_sf_vfexp_v_f16m8_tum( +// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vfexp.mask.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]] +// +vfloat16m8_t test_sf_vfexp_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, +                                       vfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_sf_vfexp_v_f16mf4_tumu( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vfexp.mask.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]] +// +vfloat16mf4_t test_sf_vfexp_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, +                                          vfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_sf_vfexp_v_f16mf2_tumu( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vfexp.mask.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]] +// +vfloat16mf2_t test_sf_vfexp_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, +                                          vfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_sf_vfexp_v_f16m1_tumu( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vfexp.mask.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]] +// +vfloat16m1_t test_sf_vfexp_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, +                                        vfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_sf_vfexp_v_f16m2_tumu( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vfexp.mask.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]] +// +vfloat16m2_t test_sf_vfexp_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, +                                        vfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_sf_vfexp_v_f16m4_tumu( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vfexp.mask.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]] +// +vfloat16m4_t test_sf_vfexp_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, +                                        vfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_sf_vfexp_v_f16m8_tumu( +// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vfexp.mask.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]] +// +vfloat16m8_t test_sf_vfexp_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, +                                        vfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_sf_vfexp_v_f16mf4_mu( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vfexp.mask.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]] +// +vfloat16mf4_t test_sf_vfexp_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, +                                        vfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_sf_vfexp_v_f16mf2_mu( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vfexp.mask.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]] +// +vfloat16mf2_t test_sf_vfexp_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, +                                        vfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_sf_vfexp_v_f16m1_mu( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vfexp.mask.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]] +// +vfloat16m1_t test_sf_vfexp_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, +                                      vfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_sf_vfexp_v_f16m2_mu( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vfexp.mask.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]] +// +vfloat16m2_t test_sf_vfexp_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, +                                      vfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_sf_vfexp_v_f16m4_mu( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vfexp.mask.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]] +// +vfloat16m4_t test_sf_vfexp_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, +                                      vfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_sf_vfexp_v_f16m8_mu( +// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vfexp.mask.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]] +// +vfloat16m8_t test_sf_vfexp_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, +                                      vfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/overloaded/sf_vfexp_v_32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/overloaded/sf_vfexp_v_32.c new file mode 100644 index 0000000..e08d6c5b --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/overloaded/sf_vfexp_v_32.c @@ -0,0 +1,228 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x -target-feature +zve32f \ +// RUN:   -target-feature +xsfvfexp32e -disable-O0-optnone  \ +// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN:   FileCheck --check-prefix=CHECK-RV64 %s + +#include <sifive_vector.h> + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_sf_vfexp_v_f32mf2_tu( +// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vfexp.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]] +// +vfloat32mf2_t test_sf_vfexp_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, +                                        size_t vl) { +  return __riscv_sf_vfexp_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_sf_vfexp_v_f32m1_tu( +// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vfexp.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]] +// +vfloat32m1_t test_sf_vfexp_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, +                                      size_t vl) { +  return __riscv_sf_vfexp_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_sf_vfexp_v_f32m2_tu( +// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vfexp.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]] +// +vfloat32m2_t test_sf_vfexp_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, +                                      size_t vl) { +  return __riscv_sf_vfexp_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_sf_vfexp_v_f32m4_tu( +// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vfexp.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]] +// +vfloat32m4_t test_sf_vfexp_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, +                                      size_t vl) { +  return __riscv_sf_vfexp_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_sf_vfexp_v_f32m8_tu( +// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vfexp.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]] +// +vfloat32m8_t test_sf_vfexp_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, +                                      size_t vl) { +  return __riscv_sf_vfexp_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_sf_vfexp_v_f32mf2_tum( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vfexp.mask.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]] +// +vfloat32mf2_t test_sf_vfexp_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, +                                         vfloat32mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_sf_vfexp_v_f32m1_tum( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vfexp.mask.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]] +// +vfloat32m1_t test_sf_vfexp_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, +                                       vfloat32m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_sf_vfexp_v_f32m2_tum( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vfexp.mask.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]] +// +vfloat32m2_t test_sf_vfexp_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, +                                       vfloat32m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_sf_vfexp_v_f32m4_tum( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vfexp.mask.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]] +// +vfloat32m4_t test_sf_vfexp_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, +                                       vfloat32m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_sf_vfexp_v_f32m8_tum( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vfexp.mask.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]] +// +vfloat32m8_t test_sf_vfexp_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, +                                       vfloat32m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_sf_vfexp_v_f32mf2_tumu( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vfexp.mask.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]] +// +vfloat32mf2_t test_sf_vfexp_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, +                                          vfloat32mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_sf_vfexp_v_f32m1_tumu( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vfexp.mask.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]] +// +vfloat32m1_t test_sf_vfexp_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, +                                        vfloat32m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_sf_vfexp_v_f32m2_tumu( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vfexp.mask.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]] +// +vfloat32m2_t test_sf_vfexp_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, +                                        vfloat32m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_sf_vfexp_v_f32m4_tumu( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vfexp.mask.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]] +// +vfloat32m4_t test_sf_vfexp_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, +                                        vfloat32m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_sf_vfexp_v_f32m8_tumu( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vfexp.mask.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]] +// +vfloat32m8_t test_sf_vfexp_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, +                                        vfloat32m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_sf_vfexp_v_f32mf2_mu( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vfexp.mask.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]] +// +vfloat32mf2_t test_sf_vfexp_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, +                                        vfloat32mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_sf_vfexp_v_f32m1_mu( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vfexp.mask.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]] +// +vfloat32m1_t test_sf_vfexp_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, +                                      vfloat32m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_sf_vfexp_v_f32m2_mu( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vfexp.mask.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]] +// +vfloat32m2_t test_sf_vfexp_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, +                                      vfloat32m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_sf_vfexp_v_f32m4_mu( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vfexp.mask.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]] +// +vfloat32m4_t test_sf_vfexp_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, +                                      vfloat32m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_sf_vfexp_v_f32m8_mu( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vfexp.mask.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]] +// +vfloat32m8_t test_sf_vfexp_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, +                                      vfloat32m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/overloaded/sf_vfexp_v_bf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/overloaded/sf_vfexp_v_bf.c new file mode 100644 index 0000000..14570d4 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/overloaded/sf_vfexp_v_bf.c @@ -0,0 +1,272 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x -target-feature +zve32f \ +// RUN:   -target-feature +zvfbfmin -target-feature +xsfvfbfexp16e -disable-O0-optnone  \ +// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN:   FileCheck --check-prefix=CHECK-RV64 %s + +#include <sifive_vector.h> + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_sf_vfexp_v_bf16mf4_tu( +// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.sf.vfexp.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]] +// +vbfloat16mf4_t test_sf_vfexp_v_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs2, +                                          size_t vl) { +  return __riscv_sf_vfexp_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_sf_vfexp_v_bf16mf2_tu( +// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.sf.vfexp.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]] +// +vbfloat16mf2_t test_sf_vfexp_v_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs2, +                                          size_t vl) { +  return __riscv_sf_vfexp_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_sf_vfexp_v_bf16m1_tu( +// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.sf.vfexp.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]] +// +vbfloat16m1_t test_sf_vfexp_v_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs2, +                                        size_t vl) { +  return __riscv_sf_vfexp_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_sf_vfexp_v_bf16m2_tu( +// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.sf.vfexp.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]] +// +vbfloat16m2_t test_sf_vfexp_v_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs2, +                                        size_t vl) { +  return __riscv_sf_vfexp_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_sf_vfexp_v_bf16m4_tu( +// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.sf.vfexp.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]] +// +vbfloat16m4_t test_sf_vfexp_v_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs2, +                                        size_t vl) { +  return __riscv_sf_vfexp_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_sf_vfexp_v_bf16m8_tu( +// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.sf.vfexp.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]] +// +vbfloat16m8_t test_sf_vfexp_v_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs2, +                                        size_t vl) { +  return __riscv_sf_vfexp_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_sf_vfexp_v_bf16mf4_tum( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]] +// +vbfloat16mf4_t test_sf_vfexp_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, +                                           vbfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_sf_vfexp_v_bf16mf2_tum( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]] +// +vbfloat16mf2_t test_sf_vfexp_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, +                                           vbfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_sf_vfexp_v_bf16m1_tum( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]] +// +vbfloat16m1_t test_sf_vfexp_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, +                                         vbfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_sf_vfexp_v_bf16m2_tum( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]] +// +vbfloat16m2_t test_sf_vfexp_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, +                                         vbfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_sf_vfexp_v_bf16m4_tum( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]] +// +vbfloat16m4_t test_sf_vfexp_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, +                                         vbfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_sf_vfexp_v_bf16m8_tum( +// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]] +// +vbfloat16m8_t test_sf_vfexp_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, +                                         vbfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_sf_vfexp_v_bf16mf4_tumu( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]] +// +vbfloat16mf4_t test_sf_vfexp_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, +                                            vbfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_sf_vfexp_v_bf16mf2_tumu( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]] +// +vbfloat16mf2_t test_sf_vfexp_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, +                                            vbfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_sf_vfexp_v_bf16m1_tumu( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]] +// +vbfloat16m1_t test_sf_vfexp_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, +                                          vbfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_sf_vfexp_v_bf16m2_tumu( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]] +// +vbfloat16m2_t test_sf_vfexp_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, +                                          vbfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_sf_vfexp_v_bf16m4_tumu( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]] +// +vbfloat16m4_t test_sf_vfexp_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, +                                          vbfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_sf_vfexp_v_bf16m8_tumu( +// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]] +// +vbfloat16m8_t test_sf_vfexp_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, +                                          vbfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_sf_vfexp_v_bf16mf4_mu( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]] +// +vbfloat16mf4_t test_sf_vfexp_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, +                                          vbfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_sf_vfexp_v_bf16mf2_mu( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]] +// +vbfloat16mf2_t test_sf_vfexp_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, +                                          vbfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_sf_vfexp_v_bf16m1_mu( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]] +// +vbfloat16m1_t test_sf_vfexp_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, +                                        vbfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexp_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_sf_vfexp_v_bf16m2_mu( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]] +// +vbfloat16m2_t test_sf_vfexp_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, +                                        vbfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexp_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_sf_vfexp_v_bf16m4_mu( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]] +// +vbfloat16m4_t test_sf_vfexp_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, +                                        vbfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexp_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_sf_vfexp_v_bf16m8_mu( +// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  [[ENTRY:.*:]] +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.sf.vfexp.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]] +// +vbfloat16m8_t test_sf_vfexp_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, +                                        vbfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexp_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/overloaded/sf_vfexpa_v.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/overloaded/sf_vfexpa_v.c new file mode 100644 index 0000000..4ac5cfc --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/overloaded/sf_vfexpa_v.c @@ -0,0 +1,492 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64f -target-feature +zvfh \ +// RUN:   -target-feature +xsfvfexpa -disable-O0-optnone  \ +// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN:   FileCheck --check-prefix=CHECK-RV64 %s + +#include <sifive_vector.h> + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_sf_vfexpa_v_f16mf4_tu( +// CHECK-RV64-SAME: <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vfexpa.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]] +// +vfloat16mf4_t test_sf_vfexpa_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, +                                         size_t vl) { +  return __riscv_sf_vfexpa_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_sf_vfexpa_v_f16mf2_tu( +// CHECK-RV64-SAME: <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vfexpa.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]] +// +vfloat16mf2_t test_sf_vfexpa_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, +                                         size_t vl) { +  return __riscv_sf_vfexpa_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_sf_vfexpa_v_f16m1_tu( +// CHECK-RV64-SAME: <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vfexpa.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]] +// +vfloat16m1_t test_sf_vfexpa_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexpa_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_sf_vfexpa_v_f16m2_tu( +// CHECK-RV64-SAME: <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vfexpa.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]] +// +vfloat16m2_t test_sf_vfexpa_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexpa_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_sf_vfexpa_v_f16m4_tu( +// CHECK-RV64-SAME: <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vfexpa.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]] +// +vfloat16m4_t test_sf_vfexpa_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexpa_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_sf_vfexpa_v_f16m8_tu( +// CHECK-RV64-SAME: <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vfexpa.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]] +// +vfloat16m8_t test_sf_vfexpa_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexpa_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_sf_vfexpa_v_f32mf2_tu( +// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vfexpa.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]] +// +vfloat32mf2_t test_sf_vfexpa_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, +                                         size_t vl) { +  return __riscv_sf_vfexpa_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_sf_vfexpa_v_f32m1_tu( +// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vfexpa.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]] +// +vfloat32m1_t test_sf_vfexpa_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexpa_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_sf_vfexpa_v_f32m2_tu( +// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vfexpa.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]] +// +vfloat32m2_t test_sf_vfexpa_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexpa_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_sf_vfexpa_v_f32m4_tu( +// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vfexpa.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]] +// +vfloat32m4_t test_sf_vfexpa_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexpa_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_sf_vfexpa_v_f32m8_tu( +// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vfexpa.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]] +// +vfloat32m8_t test_sf_vfexpa_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexpa_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_sf_vfexpa_v_f16mf4_tum( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vfexpa.mask.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]] +// +vfloat16mf4_t test_sf_vfexpa_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, +                                          vfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_sf_vfexpa_v_f16mf2_tum( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vfexpa.mask.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]] +// +vfloat16mf2_t test_sf_vfexpa_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, +                                          vfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_sf_vfexpa_v_f16m1_tum( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vfexpa.mask.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]] +// +vfloat16m1_t test_sf_vfexpa_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, +                                        vfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_sf_vfexpa_v_f16m2_tum( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vfexpa.mask.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]] +// +vfloat16m2_t test_sf_vfexpa_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, +                                        vfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_sf_vfexpa_v_f16m4_tum( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vfexpa.mask.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]] +// +vfloat16m4_t test_sf_vfexpa_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, +                                        vfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_sf_vfexpa_v_f16m8_tum( +// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vfexpa.mask.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]] +// +vfloat16m8_t test_sf_vfexpa_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, +                                        vfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_sf_vfexpa_v_f32mf2_tum( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vfexpa.mask.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]] +// +vfloat32mf2_t test_sf_vfexpa_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, +                                          vfloat32mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_sf_vfexpa_v_f32m1_tum( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vfexpa.mask.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]] +// +vfloat32m1_t test_sf_vfexpa_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, +                                        vfloat32m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_sf_vfexpa_v_f32m2_tum( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vfexpa.mask.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]] +// +vfloat32m2_t test_sf_vfexpa_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, +                                        vfloat32m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_sf_vfexpa_v_f32m4_tum( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vfexpa.mask.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]] +// +vfloat32m4_t test_sf_vfexpa_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, +                                        vfloat32m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_sf_vfexpa_v_f32m8_tum( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vfexpa.mask.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]] +// +vfloat32m8_t test_sf_vfexpa_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, +                                        vfloat32m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_sf_vfexpa_v_f16mf4_tumu( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vfexpa.mask.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]] +// +vfloat16mf4_t test_sf_vfexpa_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, +                                           vfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_sf_vfexpa_v_f16mf2_tumu( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vfexpa.mask.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]] +// +vfloat16mf2_t test_sf_vfexpa_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, +                                           vfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_sf_vfexpa_v_f16m1_tumu( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vfexpa.mask.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]] +// +vfloat16m1_t test_sf_vfexpa_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, +                                         vfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_sf_vfexpa_v_f16m2_tumu( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vfexpa.mask.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]] +// +vfloat16m2_t test_sf_vfexpa_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, +                                         vfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_sf_vfexpa_v_f16m4_tumu( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vfexpa.mask.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]] +// +vfloat16m4_t test_sf_vfexpa_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, +                                         vfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_sf_vfexpa_v_f16m8_tumu( +// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vfexpa.mask.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]] +// +vfloat16m8_t test_sf_vfexpa_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, +                                         vfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_sf_vfexpa_v_f32mf2_tumu( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vfexpa.mask.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]] +// +vfloat32mf2_t test_sf_vfexpa_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, +                                           vfloat32mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_sf_vfexpa_v_f32m1_tumu( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vfexpa.mask.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]] +// +vfloat32m1_t test_sf_vfexpa_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, +                                         vfloat32m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_sf_vfexpa_v_f32m2_tumu( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vfexpa.mask.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]] +// +vfloat32m2_t test_sf_vfexpa_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, +                                         vfloat32m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_sf_vfexpa_v_f32m4_tumu( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vfexpa.mask.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]] +// +vfloat32m4_t test_sf_vfexpa_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, +                                         vfloat32m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_sf_vfexpa_v_f32m8_tumu( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vfexpa.mask.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]] +// +vfloat32m8_t test_sf_vfexpa_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, +                                         vfloat32m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_sf_vfexpa_v_f16mf4_mu( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vfexpa.mask.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]] +// +vfloat16mf4_t test_sf_vfexpa_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, +                                         vfloat16mf4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_sf_vfexpa_v_f16mf2_mu( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vfexpa.mask.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]] +// +vfloat16mf2_t test_sf_vfexpa_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, +                                         vfloat16mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_sf_vfexpa_v_f16m1_mu( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vfexpa.mask.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]] +// +vfloat16m1_t test_sf_vfexpa_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, +                                       vfloat16m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_sf_vfexpa_v_f16m2_mu( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vfexpa.mask.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]] +// +vfloat16m2_t test_sf_vfexpa_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, +                                       vfloat16m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_sf_vfexpa_v_f16m4_mu( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vfexpa.mask.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]] +// +vfloat16m4_t test_sf_vfexpa_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, +                                       vfloat16m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_sf_vfexpa_v_f16m8_mu( +// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vfexpa.mask.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]] +// +vfloat16m8_t test_sf_vfexpa_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, +                                       vfloat16m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_sf_vfexpa_v_f32mf2_mu( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vfexpa.mask.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]] +// +vfloat32mf2_t test_sf_vfexpa_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, +                                         vfloat32mf2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_sf_vfexpa_v_f32m1_mu( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vfexpa.mask.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]] +// +vfloat32m1_t test_sf_vfexpa_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, +                                       vfloat32m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_sf_vfexpa_v_f32m2_mu( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vfexpa.mask.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]] +// +vfloat32m2_t test_sf_vfexpa_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, +                                       vfloat32m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_sf_vfexpa_v_f32m4_mu( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vfexpa.mask.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]] +// +vfloat32m4_t test_sf_vfexpa_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, +                                       vfloat32m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_sf_vfexpa_v_f32m8_mu( +// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vfexpa.mask.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]] +// +vfloat32m8_t test_sf_vfexpa_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, +                                       vfloat32m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/overloaded/sf_vfexpa_v_64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/overloaded/sf_vfexpa_v_64.c new file mode 100644 index 0000000..d0faaee5 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-sifive/policy/overloaded/sf_vfexpa_v_64.c @@ -0,0 +1,183 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +xsfvfexpa64e \ +// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN:   FileCheck --check-prefix=CHECK-RV64 %s + +#include <sifive_vector.h> + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_sf_vfexpa_v_f64m1_tu( +// CHECK-RV64-SAME: <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vfexpa.nxv1f64.i64(<vscale x 1 x double> [[VD]], <vscale x 1 x double> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]] +// +vfloat64m1_t test_sf_vfexpa_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexpa_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_sf_vfexpa_v_f64m2_tu( +// CHECK-RV64-SAME: <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vfexpa.nxv2f64.i64(<vscale x 2 x double> [[VD]], <vscale x 2 x double> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]] +// +vfloat64m2_t test_sf_vfexpa_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexpa_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_sf_vfexpa_v_f64m4_tu( +// CHECK-RV64-SAME: <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vfexpa.nxv4f64.i64(<vscale x 4 x double> [[VD]], <vscale x 4 x double> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]] +// +vfloat64m4_t test_sf_vfexpa_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexpa_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_sf_vfexpa_v_f64m8_tu( +// CHECK-RV64-SAME: <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vfexpa.nxv8f64.i64(<vscale x 8 x double> [[VD]], <vscale x 8 x double> [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]] +// +vfloat64m8_t test_sf_vfexpa_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, +                                       size_t vl) { +  return __riscv_sf_vfexpa_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_sf_vfexpa_v_f64m1_tum( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vfexpa.mask.nxv1f64.i64(<vscale x 1 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]] +// +vfloat64m1_t test_sf_vfexpa_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, +                                        vfloat64m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_sf_vfexpa_v_f64m2_tum( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vfexpa.mask.nxv2f64.i64(<vscale x 2 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]] +// +vfloat64m2_t test_sf_vfexpa_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, +                                        vfloat64m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_sf_vfexpa_v_f64m4_tum( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vfexpa.mask.nxv4f64.i64(<vscale x 4 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]] +// +vfloat64m4_t test_sf_vfexpa_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, +                                        vfloat64m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_sf_vfexpa_v_f64m8_tum( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vfexpa.mask.nxv8f64.i64(<vscale x 8 x double> [[VD]], <vscale x 8 x double> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]] +// +vfloat64m8_t test_sf_vfexpa_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, +                                        vfloat64m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_sf_vfexpa_v_f64m1_tumu( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vfexpa.mask.nxv1f64.i64(<vscale x 1 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]] +// +vfloat64m1_t test_sf_vfexpa_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, +                                         vfloat64m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_sf_vfexpa_v_f64m2_tumu( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vfexpa.mask.nxv2f64.i64(<vscale x 2 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]] +// +vfloat64m2_t test_sf_vfexpa_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, +                                         vfloat64m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_sf_vfexpa_v_f64m4_tumu( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vfexpa.mask.nxv4f64.i64(<vscale x 4 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]] +// +vfloat64m4_t test_sf_vfexpa_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, +                                         vfloat64m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_sf_vfexpa_v_f64m8_tumu( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vfexpa.mask.nxv8f64.i64(<vscale x 8 x double> [[VD]], <vscale x 8 x double> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]] +// +vfloat64m8_t test_sf_vfexpa_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, +                                         vfloat64m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_sf_vfexpa_v_f64m1_mu( +// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vfexpa.mask.nxv1f64.i64(<vscale x 1 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]] +// +vfloat64m1_t test_sf_vfexpa_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, +                                       vfloat64m1_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_sf_vfexpa_v_f64m2_mu( +// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vfexpa.mask.nxv2f64.i64(<vscale x 2 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]] +// +vfloat64m2_t test_sf_vfexpa_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, +                                       vfloat64m2_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_sf_vfexpa_v_f64m4_mu( +// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vfexpa.mask.nxv4f64.i64(<vscale x 4 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]] +// +vfloat64m4_t test_sf_vfexpa_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, +                                       vfloat64m4_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_sf_vfexpa_v_f64m8_mu( +// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT:  entry: +// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vfexpa.mask.nxv8f64.i64(<vscale x 8 x double> [[VD]], <vscale x 8 x double> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]] +// +vfloat64m8_t test_sf_vfexpa_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, +                                       vfloat64m8_t vs2, size_t vl) { +  return __riscv_sf_vfexpa_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/SystemZ/builtins-systemz-zvector.c b/clang/test/CodeGen/SystemZ/builtins-systemz-zvector.c index d5d15b4d..35fde87 100644 --- a/clang/test/CodeGen/SystemZ/builtins-systemz-zvector.c +++ b/clang/test/CodeGen/SystemZ/builtins-systemz-zvector.c @@ -3584,13 +3584,13 @@ void test_integer(void) {    // CHECK-ASM: vsrlb    vsc = vec_abs(vsc); -  // CHECK-ASM: vlcb +  // CHECK-ASM: vlpb    vss = vec_abs(vss); -  // CHECK-ASM: vlch +  // CHECK-ASM: vlph    vsi = vec_abs(vsi); -  // CHECK-ASM: vlcf +  // CHECK-ASM: vlpf    vsl = vec_abs(vsl); -  // CHECK-ASM: vlcg +  // CHECK-ASM: vlpg    vsc = vec_max(vsc, vsc);    // CHECK-ASM: vmxb diff --git a/clang/test/CodeGen/SystemZ/builtins-systemz-zvector5.c b/clang/test/CodeGen/SystemZ/builtins-systemz-zvector5.c index 6ee9e1e..cd0fafd 100644 --- a/clang/test/CodeGen/SystemZ/builtins-systemz-zvector5.c +++ b/clang/test/CodeGen/SystemZ/builtins-systemz-zvector5.c @@ -246,7 +246,7 @@ void test_integer(void) {    // CHECK-ASM: vctzq    vslll = vec_abs(vslll); -  // CHECK-ASM: vlcq +  // CHECK-ASM: vlpq    vslll = vec_avg(vslll, vslll);    // CHECK: call i128 @llvm.s390.vavgq(i128 %{{.*}}, i128 %{{.*}}) diff --git a/clang/test/CodeGen/X86/math-builtins.c b/clang/test/CodeGen/X86/math-builtins.c index a56f8ba..c7cd9ff 100644 --- a/clang/test/CodeGen/X86/math-builtins.c +++ b/clang/test/CodeGen/X86/math-builtins.c @@ -118,36 +118,36 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {    __builtin_copysign(f,f); __builtin_copysignf(f,f); __builtin_copysignl(f,f); __builtin_copysignf128(f,f); -// NO__ERRNO: declare double @llvm.copysign.f64(double, double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.copysign.f32(float, float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.copysign.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare fp128 @llvm.copysign.f128(fp128, fp128) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @llvm.copysign.f64(double, double) [[READNONE_INTRINSIC:#[0-9]+]] -// HAS_ERRNO: declare float @llvm.copysign.f32(float, float) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare x86_fp80 @llvm.copysign.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare fp128 @llvm.copysign.f128(fp128, fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.copysign.f64(double, double) [[READNONE_INTRINSIC2:#[0-9]+]] +// NO__ERRNO: declare float @llvm.copysign.f32(float, float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.copysign.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare fp128 @llvm.copysign.f128(fp128, fp128) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare double @llvm.copysign.f64(double, double) [[READNONE_INTRINSIC2:#[0-9]+]] +// HAS_ERRNO: declare float @llvm.copysign.f32(float, float) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare x86_fp80 @llvm.copysign.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare fp128 @llvm.copysign.f128(fp128, fp128) [[READNONE_INTRINSIC2]]    __builtin_fabs(f);       __builtin_fabsf(f);      __builtin_fabsl(f); __builtin_fabsf128(f); -// NO__ERRNO: declare double @llvm.fabs.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.fabs.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.fabs.f80(x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare fp128 @llvm.fabs.f128(fp128) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @llvm.fabs.f64(double) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare float @llvm.fabs.f32(float) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare x86_fp80 @llvm.fabs.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare fp128 @llvm.fabs.f128(fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.fabs.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.fabs.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.fabs.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare fp128 @llvm.fabs.f128(fp128) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare double @llvm.fabs.f64(double) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare float @llvm.fabs.f32(float) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare x86_fp80 @llvm.fabs.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare fp128 @llvm.fabs.f128(fp128) [[READNONE_INTRINSIC2]]    __builtin_frexp(f,i);    __builtin_frexpf(f,i);   __builtin_frexpl(f,i); __builtin_frexpf128(f,i); -// NO__ERRNO: declare { double, i32 } @llvm.frexp.f64.i32(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare { float, i32 } @llvm.frexp.f32.i32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare { x86_fp80, i32 } @llvm.frexp.f80.i32(x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare { fp128, i32 } @llvm.frexp.f128.i32(fp128) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare { double, i32 } @llvm.frexp.f64.i32(double) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare { float, i32 } @llvm.frexp.f32.i32(float) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare { x86_fp80, i32 } @llvm.frexp.f80.i32(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare { fp128, i32 } @llvm.frexp.f128.i32(fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare { double, i32 } @llvm.frexp.f64.i32(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare { float, i32 } @llvm.frexp.f32.i32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare { x86_fp80, i32 } @llvm.frexp.f80.i32(x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare { fp128, i32 } @llvm.frexp.f128.i32(fp128) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare { double, i32 } @llvm.frexp.f64.i32(double) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare { float, i32 } @llvm.frexp.f32.i32(float) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare { x86_fp80, i32 } @llvm.frexp.f80.i32(x86_fp80) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare { fp128, i32 } @llvm.frexp.f128.i32(fp128) [[READNONE_INTRINSIC2]]    __builtin_huge_val();    __builtin_huge_valf();   __builtin_huge_vall(); __builtin_huge_valf128(); @@ -165,10 +165,10 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {    __builtin_ldexp(f,f);    __builtin_ldexpf(f,f);   __builtin_ldexpl(f,f);  __builtin_ldexpf128(f,f); -// NO__ERRNO: declare double @llvm.ldexp.f64.i32(double, i32) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.ldexp.f32.i32(float, i32) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.ldexp.f80.i32(x86_fp80, i32) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare fp128 @llvm.ldexp.f128.i32(fp128, i32) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.ldexp.f64.i32(double, i32) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.ldexp.f32.i32(float, i32) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.ldexp.f80.i32(x86_fp80, i32) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare fp128 @llvm.ldexp.f128.i32(fp128, i32) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare double @ldexp(double noundef, i32 noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare float @ldexpf(float noundef, i32 noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare x86_fp80 @ldexpl(x86_fp80 noundef, i32 noundef) [[NOT_READNONE]] @@ -180,7 +180,7 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {  // NO__ERRNO: declare { float, float } @llvm.modf.f32(float) [[READNONE_INTRINSIC]]  // NO__ERRNO: declare { x86_fp80, x86_fp80 } @llvm.modf.f80(x86_fp80) [[READNONE_INTRINSIC]]  // NO__ERRNO: declare fp128 @modff128(fp128 noundef, ptr noundef) [[NOT_READNONE:#[0-9]+]] -// HAS_ERRNO: declare { double, double } @llvm.modf.f64(double) [[READNONE_INTRINSIC]] +// HAS_ERRNO: declare { double, double } @llvm.modf.f64(double) [[READNONE_INTRINSIC:#[0-9]+]]  // HAS_ERRNO: declare { float, float } @llvm.modf.f32(float) [[READNONE_INTRINSIC]]  // HAS_ERRNO: declare { x86_fp80, x86_fp80 } @llvm.modf.f80(x86_fp80) [[READNONE_INTRINSIC]]  // HAS_ERRNO: declare fp128 @modff128(fp128 noundef, ptr noundef) [[NOT_READNONE]] @@ -209,10 +209,10 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {    __builtin_pow(f,f);        __builtin_powf(f,f);       __builtin_powl(f,f); __builtin_powf128(f,f); -// NO__ERRNO: declare double @llvm.pow.f64(double, double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.pow.f32(float, float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.pow.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare fp128 @llvm.pow.f128(fp128, fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.pow.f64(double, double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.pow.f32(float, float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.pow.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare fp128 @llvm.pow.f128(fp128, fp128) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare double @pow(double noundef, double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare float @powf(float noundef, float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare x86_fp80 @powl(x86_fp80 noundef, x86_fp80 noundef) [[NOT_READNONE]] @@ -220,12 +220,12 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {    __builtin_powi(f,f);        __builtin_powif(f,f);       __builtin_powil(f,f); -// NO__ERRNO: declare double @llvm.powi.f64.i32(double, i32) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.powi.f32.i32(float, i32) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.powi.f80.i32(x86_fp80, i32) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @llvm.powi.f64.i32(double, i32) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare float @llvm.powi.f32.i32(float, i32) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare x86_fp80 @llvm.powi.f80.i32(x86_fp80, i32) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.powi.f64.i32(double, i32) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.powi.f32.i32(float, i32) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.powi.f80.i32(x86_fp80, i32) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare double @llvm.powi.f64.i32(double, i32) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare float @llvm.powi.f32.i32(float, i32) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare x86_fp80 @llvm.powi.f80.i32(x86_fp80, i32) [[READNONE_INTRINSIC2]]    /* math */    __builtin_acos(f);       __builtin_acosf(f);      __builtin_acosl(f); __builtin_acosf128(f); @@ -307,21 +307,21 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {    __builtin_ceil(f);       __builtin_ceilf(f);      __builtin_ceill(f); __builtin_ceilf128(f); -// NO__ERRNO: declare double @llvm.ceil.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.ceil.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.ceil.f80(x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare fp128 @llvm.ceil.f128(fp128) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @llvm.ceil.f64(double) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare float @llvm.ceil.f32(float) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare x86_fp80 @llvm.ceil.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare fp128 @llvm.ceil.f128(fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.ceil.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.ceil.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.ceil.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare fp128 @llvm.ceil.f128(fp128) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare double @llvm.ceil.f64(double) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare float @llvm.ceil.f32(float) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare x86_fp80 @llvm.ceil.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare fp128 @llvm.ceil.f128(fp128) [[READNONE_INTRINSIC2]]    __builtin_cos(f);        __builtin_cosf(f);       __builtin_cosl(f); __builtin_cosf128(f); -// NO__ERRNO: declare double @llvm.cos.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.cos.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.cos.f80(x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare fp128 @llvm.cos.f128(fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.cos.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.cos.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.cos.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare fp128 @llvm.cos.f128(fp128) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare double @cos(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare float @cosf(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare x86_fp80 @cosl(x86_fp80 noundef) [[NOT_READNONE]] @@ -362,10 +362,10 @@ __builtin_erfc(f);       __builtin_erfcf(f);      __builtin_erfcl(f); __builtin_  __builtin_exp(f);        __builtin_expf(f);       __builtin_expl(f); __builtin_expf128(f); -// NO__ERRNO: declare double @llvm.exp.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.exp.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.exp.f80(x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare fp128 @llvm.exp.f128(fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.exp.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.exp.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.exp.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare fp128 @llvm.exp.f128(fp128) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare double @exp(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare float @expf(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare x86_fp80 @expl(x86_fp80 noundef) [[NOT_READNONE]] @@ -373,10 +373,10 @@ __builtin_exp(f);        __builtin_expf(f);       __builtin_expl(f); __builtin_e  __builtin_exp2(f);       __builtin_exp2f(f);      __builtin_exp2l(f); __builtin_exp2f128(f); -// NO__ERRNO: declare double @llvm.exp2.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.exp2.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.exp2.f80(x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare fp128 @llvm.exp2.f128(fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.exp2.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.exp2.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.exp2.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare fp128 @llvm.exp2.f128(fp128) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare double @exp2(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare float @exp2f(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare x86_fp80 @exp2l(x86_fp80 noundef) [[NOT_READNONE]] @@ -384,10 +384,10 @@ __builtin_exp2(f);       __builtin_exp2f(f);      __builtin_exp2l(f); __builtin_  __builtin_exp10(f);       __builtin_exp10f(f);      __builtin_exp10l(f); __builtin_exp10f128(f); -// NO__ERRNO: declare double @llvm.exp10.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.exp10.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.exp10.f80(x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare fp128 @llvm.exp10.f128(fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.exp10.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.exp10.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.exp10.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare fp128 @llvm.exp10.f128(fp128) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare double @exp10(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare float @exp10f(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare x86_fp80 @exp10l(x86_fp80 noundef) [[NOT_READNONE]] @@ -417,22 +417,22 @@ __builtin_fdim(f,f);       __builtin_fdimf(f,f);      __builtin_fdiml(f,f); __bu  __builtin_floor(f);      __builtin_floorf(f);     __builtin_floorl(f); __builtin_floorf128(f); -// NO__ERRNO: declare double @llvm.floor.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.floor.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.floor.f80(x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare fp128 @llvm.floor.f128(fp128) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @llvm.floor.f64(double) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare float @llvm.floor.f32(float) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare x86_fp80 @llvm.floor.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare fp128 @llvm.floor.f128(fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.floor.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.floor.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.floor.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare fp128 @llvm.floor.f128(fp128) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare double @llvm.floor.f64(double) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare float @llvm.floor.f32(float) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare x86_fp80 @llvm.floor.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare fp128 @llvm.floor.f128(fp128) [[READNONE_INTRINSIC2]]  __builtin_fma(f,f,f);        __builtin_fmaf(f,f,f);       __builtin_fmal(f,f,f); __builtin_fmaf128(f,f,f);  __builtin_fmaf16(f,f,f); -// NO__ERRNO: declare double @llvm.fma.f64(double, double, double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.fma.f32(float, float, float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare fp128 @llvm.fma.f128(fp128, fp128, fp128) [[READNONE_INTRINSIC]] -// NO__ERRONO: declare half @llvm.fma.f16(half, half, half) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.fma.f64(double, double, double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.fma.f32(float, float, float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare fp128 @llvm.fma.f128(fp128, fp128, fp128) [[READNONE_INTRINSIC2]] +// NO__ERRONO: declare half @llvm.fma.f16(half, half, half) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare double @fma(double noundef, double noundef, double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare float @fmaf(float noundef, float noundef, float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare x86_fp80 @fmal(x86_fp80 noundef, x86_fp80 noundef, x86_fp80 noundef) [[NOT_READNONE]] @@ -454,25 +454,25 @@ __builtin_fma(f,f,f);        __builtin_fmaf(f,f,f);       __builtin_fmal(f,f,f);  __builtin_fmax(f,f);       __builtin_fmaxf(f,f);      __builtin_fmaxl(f,f); __builtin_fmaxf128(f,f); -// NO__ERRNO: declare double @llvm.maxnum.f64(double, double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.maxnum.f32(float, float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.maxnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare fp128 @llvm.maxnum.f128(fp128, fp128) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @llvm.maxnum.f64(double, double) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare float @llvm.maxnum.f32(float, float) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare x86_fp80 @llvm.maxnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare fp128 @llvm.maxnum.f128(fp128, fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.maxnum.f64(double, double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.maxnum.f32(float, float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.maxnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare fp128 @llvm.maxnum.f128(fp128, fp128) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare double @llvm.maxnum.f64(double, double) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare float @llvm.maxnum.f32(float, float) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare x86_fp80 @llvm.maxnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare fp128 @llvm.maxnum.f128(fp128, fp128) [[READNONE_INTRINSIC2]]  __builtin_fmin(f,f);       __builtin_fminf(f,f);      __builtin_fminl(f,f); __builtin_fminf128(f,f); -// NO__ERRNO: declare double @llvm.minnum.f64(double, double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.minnum.f32(float, float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.minnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare fp128 @llvm.minnum.f128(fp128, fp128) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @llvm.minnum.f64(double, double) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare float @llvm.minnum.f32(float, float) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare x86_fp80 @llvm.minnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare fp128 @llvm.minnum.f128(fp128, fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.minnum.f64(double, double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.minnum.f32(float, float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.minnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare fp128 @llvm.minnum.f128(fp128, fp128) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare double @llvm.minnum.f64(double, double) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare float @llvm.minnum.f32(float, float) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare x86_fp80 @llvm.minnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare fp128 @llvm.minnum.f128(fp128, fp128) [[READNONE_INTRINSIC2]]  __builtin_hypot(f,f);      __builtin_hypotf(f,f);     __builtin_hypotl(f,f); __builtin_hypotf128(f,f); @@ -509,10 +509,10 @@ __builtin_lgamma(f);     __builtin_lgammaf(f);    __builtin_lgammal(f); __builti  __builtin_llrint(f);     __builtin_llrintf(f);    __builtin_llrintl(f); __builtin_llrintf128(f); -// NO__ERRNO: declare i64 @llvm.llrint.i64.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare i64 @llvm.llrint.i64.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare i64 @llvm.llrint.i64.f80(x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare i64 @llvm.llrint.i64.f128(fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare i64 @llvm.llrint.i64.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare i64 @llvm.llrint.i64.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare i64 @llvm.llrint.i64.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare i64 @llvm.llrint.i64.f128(fp128) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare i64 @llrint(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare i64 @llrintf(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare i64 @llrintl(x86_fp80 noundef) [[NOT_READNONE]] @@ -520,10 +520,10 @@ __builtin_llrint(f);     __builtin_llrintf(f);    __builtin_llrintl(f); __builti  __builtin_llround(f);    __builtin_llroundf(f);   __builtin_llroundl(f); __builtin_llroundf128(f); -// NO__ERRNO: declare i64 @llvm.llround.i64.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare i64 @llvm.llround.i64.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare i64 @llvm.llround.i64.f80(x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare i64 @llvm.llround.i64.f128(fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare i64 @llvm.llround.i64.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare i64 @llvm.llround.i64.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare i64 @llvm.llround.i64.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare i64 @llvm.llround.i64.f128(fp128) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare i64 @llround(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare i64 @llroundf(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare i64 @llroundl(x86_fp80 noundef) [[NOT_READNONE]] @@ -531,10 +531,10 @@ __builtin_llround(f);    __builtin_llroundf(f);   __builtin_llroundl(f); __built  __builtin_log(f);        __builtin_logf(f);       __builtin_logl(f); __builtin_logf128(f); -// NO__ERRNO: declare double @llvm.log.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.log.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.log.f80(x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare fp128 @llvm.log.f128(fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.log.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.log.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.log.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare fp128 @llvm.log.f128(fp128) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare double @log(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare float @logf(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare x86_fp80 @logl(x86_fp80 noundef) [[NOT_READNONE]] @@ -542,10 +542,10 @@ __builtin_log(f);        __builtin_logf(f);       __builtin_logl(f); __builtin_l  __builtin_log10(f);      __builtin_log10f(f);     __builtin_log10l(f); __builtin_log10f128(f); -// NO__ERRNO: declare double @llvm.log10.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.log10.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.log10.f80(x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare fp128 @llvm.log10.f128(fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.log10.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.log10.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.log10.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare fp128 @llvm.log10.f128(fp128) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare double @log10(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare float @log10f(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare x86_fp80 @log10l(x86_fp80 noundef) [[NOT_READNONE]] @@ -564,10 +564,10 @@ __builtin_log1p(f);      __builtin_log1pf(f);     __builtin_log1pl(f); __builtin  __builtin_log2(f);       __builtin_log2f(f);      __builtin_log2l(f); __builtin_log2f128(f); -// NO__ERRNO: declare double @llvm.log2.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.log2.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.log2.f80(x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare fp128 @llvm.log2.f128(fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.log2.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.log2.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.log2.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare fp128 @llvm.log2.f128(fp128) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare double @log2(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare float @log2f(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare x86_fp80 @log2l(x86_fp80 noundef) [[NOT_READNONE]] @@ -586,10 +586,10 @@ __builtin_logb(f);       __builtin_logbf(f);      __builtin_logbl(f); __builtin_  __builtin_lrint(f);      __builtin_lrintf(f);     __builtin_lrintl(f); __builtin_lrintf128(f); -// NO__ERRNO: declare i64 @llvm.lrint.i64.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare i64 @llvm.lrint.i64.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare i64 @llvm.lrint.i64.f80(x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare i64 @llvm.lrint.i64.f128(fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare i64 @llvm.lrint.i64.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare i64 @llvm.lrint.i64.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare i64 @llvm.lrint.i64.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare i64 @llvm.lrint.i64.f128(fp128) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare i64 @lrint(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare i64 @lrintf(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare i64 @lrintl(x86_fp80 noundef) [[NOT_READNONE]] @@ -597,10 +597,10 @@ __builtin_lrint(f);      __builtin_lrintf(f);     __builtin_lrintl(f); __builtin  __builtin_lround(f);     __builtin_lroundf(f);    __builtin_lroundl(f);  __builtin_lroundf128(f); -// NO__ERRNO: declare i64 @llvm.lround.i64.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare i64 @llvm.lround.i64.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare i64 @llvm.lround.i64.f80(x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare i64 @llvm.lround.i64.f128(fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare i64 @llvm.lround.i64.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare i64 @llvm.lround.i64.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare i64 @llvm.lround.i64.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare i64 @llvm.lround.i64.f128(fp128) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare i64 @lround(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare i64 @lroundf(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare i64 @lroundl(x86_fp80 noundef) [[NOT_READNONE]] @@ -608,14 +608,14 @@ __builtin_lround(f);     __builtin_lroundf(f);    __builtin_lroundl(f);  __built  __builtin_nearbyint(f);  __builtin_nearbyintf(f); __builtin_nearbyintl(f); __builtin_nearbyintf128(f); -// NO__ERRNO: declare double @llvm.nearbyint.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.nearbyint.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.nearbyint.f80(x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare fp128 @llvm.nearbyint.f128(fp128) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @llvm.nearbyint.f64(double) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare float @llvm.nearbyint.f32(float) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare x86_fp80 @llvm.nearbyint.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare fp128 @llvm.nearbyint.f128(fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.nearbyint.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.nearbyint.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.nearbyint.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare fp128 @llvm.nearbyint.f128(fp128) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare double @llvm.nearbyint.f64(double) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare float @llvm.nearbyint.f32(float) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare x86_fp80 @llvm.nearbyint.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare fp128 @llvm.nearbyint.f128(fp128) [[READNONE_INTRINSIC2]]  __builtin_nextafter(f,f);  __builtin_nextafterf(f,f); __builtin_nextafterl(f,f); __builtin_nextafterf128(f,f); @@ -663,25 +663,25 @@ __builtin_remquo(f,f,i);  __builtin_remquof(f,f,i); __builtin_remquol(f,f,i); __  __builtin_rint(f);       __builtin_rintf(f);      __builtin_rintl(f); __builtin_rintf128(f); -// NO__ERRNO: declare double @llvm.rint.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.rint.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.rint.f80(x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare fp128 @llvm.rint.f128(fp128) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @llvm.rint.f64(double) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare float @llvm.rint.f32(float) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare x86_fp80 @llvm.rint.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare fp128 @llvm.rint.f128(fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.rint.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.rint.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.rint.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare fp128 @llvm.rint.f128(fp128) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare double @llvm.rint.f64(double) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare float @llvm.rint.f32(float) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare x86_fp80 @llvm.rint.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare fp128 @llvm.rint.f128(fp128) [[READNONE_INTRINSIC2]]  __builtin_round(f);      __builtin_roundf(f);     __builtin_roundl(f); __builtin_roundf128(f); -// NO__ERRNO: declare double @llvm.round.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.round.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.round.f80(x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare fp128 @llvm.round.f128(fp128) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @llvm.round.f64(double) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare float @llvm.round.f32(float) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare x86_fp80 @llvm.round.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare fp128 @llvm.round.f128(fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.round.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.round.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.round.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare fp128 @llvm.round.f128(fp128) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare double @llvm.round.f64(double) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare float @llvm.round.f32(float) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare x86_fp80 @llvm.round.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare fp128 @llvm.round.f128(fp128) [[READNONE_INTRINSIC2]]  __builtin_scalbln(f,f);    __builtin_scalblnf(f,f);   __builtin_scalblnl(f,f); __builtin_scalblnf128(f,f); @@ -707,10 +707,10 @@ __builtin_scalbn(f,f);     __builtin_scalbnf(f,f);    __builtin_scalbnl(f,f); __  __builtin_sin(f);        __builtin_sinf(f);       __builtin_sinl(f); __builtin_sinf128(f); -// NO__ERRNO: declare double @llvm.sin.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.sin.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.sin.f80(x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare fp128 @llvm.sin.f128(fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.sin.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.sin.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.sin.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare fp128 @llvm.sin.f128(fp128) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare double @sin(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare float @sinf(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare x86_fp80 @sinl(x86_fp80 noundef) [[NOT_READNONE]] @@ -747,10 +747,10 @@ __builtin_sincospi(f,d,d); __builtin_sincospif(f,fp,fp); __builtin_sincospil(f,l  __builtin_sqrt(f);       __builtin_sqrtf(f);      __builtin_sqrtl(f); __builtin_sqrtf128(f); -// NO__ERRNO: declare double @llvm.sqrt.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.sqrt.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.sqrt.f80(x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare fp128 @llvm.sqrt.f128(fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.sqrt.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.sqrt.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.sqrt.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare fp128 @llvm.sqrt.f128(fp128) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare double @sqrt(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare float @sqrtf(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare x86_fp80 @sqrtl(x86_fp80 noundef) [[NOT_READNONE]] @@ -791,22 +791,24 @@ __builtin_tgamma(f);     __builtin_tgammaf(f);    __builtin_tgammal(f); __builti  __builtin_trunc(f);      __builtin_truncf(f);     __builtin_truncl(f); __builtin_truncf128(f); -// NO__ERRNO: declare double @llvm.trunc.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.trunc.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.trunc.f80(x86_fp80) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare fp128 @llvm.trunc.f128(fp128) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @llvm.trunc.f64(double) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare float @llvm.trunc.f32(float) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare x86_fp80 @llvm.trunc.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare fp128 @llvm.trunc.f128(fp128) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.trunc.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.trunc.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.trunc.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare fp128 @llvm.trunc.f128(fp128) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare double @llvm.trunc.f64(double) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare float @llvm.trunc.f32(float) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare x86_fp80 @llvm.trunc.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare fp128 @llvm.trunc.f128(fp128) [[READNONE_INTRINSIC2]]  };  // NO__ERRNO: attributes [[READNONE_INTRINSIC]] = { {{.*}}memory(none){{.*}} } +// NO__ERRNO: attributes [[READNONE_INTRINSIC2]] = { {{.*}}memory(none){{.*}} }  // NO__ERRNO: attributes [[NOT_READNONE]] = { nounwind {{.*}} }  // NO__ERRNO: attributes [[PURE]] = { {{.*}}memory(read){{.*}} }  // NO__ERRNO: attributes [[READNONE]] = { {{.*}}memory(none){{.*}} }  // HAS_ERRNO: attributes [[NOT_READNONE]] = { nounwind {{.*}} } +// HAS_ERRNO: attributes [[READNONE_INTRINSIC2]] = { {{.*}}memory(none){{.*}} }  // HAS_ERRNO: attributes [[READNONE_INTRINSIC]] = { {{.*}}memory(none){{.*}} }  // HAS_ERRNO: attributes [[PURE]] = { {{.*}}memory(read){{.*}} }  // HAS_ERRNO: attributes [[READNONE]] = { {{.*}}memory(none){{.*}} } diff --git a/clang/test/CodeGen/builtin-sqrt.c b/clang/test/CodeGen/builtin-sqrt.c index 2313a68..3ebf2ac 100644 --- a/clang/test/CodeGen/builtin-sqrt.c +++ b/clang/test/CodeGen/builtin-sqrt.c @@ -11,5 +11,5 @@ float foo(float X) {  // HAS_ERRNO-NOT: attributes [[ATTR]] = {{{.*}} memory(none)  // NO_ERRNO: declare float @llvm.sqrt.f32(float) [[ATTR:#[0-9]+]] -// NO_ERRNO: attributes [[ATTR]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } +// NO_ERRNO: attributes [[ATTR]] = { nocallback nocreateundeforpoison nofree nosync nounwind speculatable willreturn memory(none) } diff --git a/clang/test/CodeGen/libcalls.c b/clang/test/CodeGen/libcalls.c index 1e4b06e..923719f 100644 --- a/clang/test/CodeGen/libcalls.c +++ b/clang/test/CodeGen/libcalls.c @@ -74,9 +74,9 @@ void test_fma(float a0, double a1, long double a2) {  // CHECK-YES: declare float @fmaf(float noundef, float noundef, float noundef)  // CHECK-YES: declare double @fma(double noundef, double noundef, double noundef)  // CHECK-YES: declare x86_fp80 @fmal(x86_fp80 noundef, x86_fp80 noundef, x86_fp80 noundef) -// CHECK-NO: declare float @llvm.fma.f32(float, float, float) [[NUW_RN2:#[0-9]+]] -// CHECK-NO: declare double @llvm.fma.f64(double, double, double) [[NUW_RN2]] -// CHECK-NO: declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80) [[NUW_RN2]] +// CHECK-NO: declare float @llvm.fma.f32(float, float, float) [[NUW_RNI]] +// CHECK-NO: declare double @llvm.fma.f64(double, double, double) [[NUW_RNI]] +// CHECK-NO: declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80) [[NUW_RNI]]  // Just checking to make sure these library functions are marked readnone  void test_builtins(double d, float f, long double ld) { @@ -85,9 +85,9 @@ void test_builtins(double d, float f, long double ld) {    double atan_ = atan(d);    long double atanl_ = atanl(ld);    float atanf_ = atanf(f); -// CHECK-NO: declare double @llvm.atan.f64(double) [[NUW_RNI:#[0-9]+]] -// CHECK-NO: declare x86_fp80 @llvm.atan.f80(x86_fp80) [[NUW_RNI]] -// CHECK-NO: declare float @llvm.atan.f32(float) [[NUW_RNI]] +// CHECK-NO: declare double @llvm.atan.f64(double) [[NUW_RN2:#[0-9]+]] +// CHECK-NO: declare x86_fp80 @llvm.atan.f80(x86_fp80) [[NUW_RN2]] +// CHECK-NO: declare float @llvm.atan.f32(float) [[NUW_RN2]]  // CHECK-YES: declare double @atan(double noundef) [[NUW:#[0-9]+]]  // CHECK-YES: declare x86_fp80 @atanl(x86_fp80 noundef) [[NUW]]  // CHECK-YES: declare float @atanf(float noundef) [[NUW]] @@ -95,9 +95,9 @@ void test_builtins(double d, float f, long double ld) {    double atan2_ = atan2(d, 2);    long double atan2l_ = atan2l(ld, ld);    float atan2f_ = atan2f(f, f); -// CHECK-NO: declare double @llvm.atan2.f64(double, double) [[NUW_RNI]] -// CHECK-NO: declare x86_fp80 @llvm.atan2.f80(x86_fp80, x86_fp80) [[NUW_RNI]] -// CHECK-NO: declare float @llvm.atan2.f32(float, float) [[NUW_RNI]] +// CHECK-NO: declare double @llvm.atan2.f64(double, double) [[NUW_RN2]] +// CHECK-NO: declare x86_fp80 @llvm.atan2.f80(x86_fp80, x86_fp80) [[NUW_RN2]] +// CHECK-NO: declare float @llvm.atan2.f32(float, float) [[NUW_RN2]]  // CHECK-YES: declare double @atan2(double noundef, double noundef) [[NUW]]  // CHECK-YES: declare x86_fp80 @atan2l(x86_fp80 noundef, x86_fp80 noundef) [[NUW]]  // CHECK-YES: declare float @atan2f(float noundef, float noundef) [[NUW]] @@ -124,4 +124,4 @@ void test_builtins(double d, float f, long double ld) {  }  // CHECK-YES: attributes [[NUW]] = { nounwind "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+cx8,+x87" } -// CHECK-NO-DAG: attributes [[NUW_RNI]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } +// CHECK-NO-DAG: attributes [[NUW_RNI]] = { nocallback nocreateundeforpoison nofree nosync nounwind speculatable willreturn memory(none) } diff --git a/clang/test/CodeGen/math-libcalls.c b/clang/test/CodeGen/math-libcalls.c index ad29782..d4cd6f86 100644 --- a/clang/test/CodeGen/math-libcalls.c +++ b/clang/test/CodeGen/math-libcalls.c @@ -35,27 +35,27 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {    copysign(f,f); copysignf(f,f);copysignl(f,f); -  // NO__ERRNO: declare double @llvm.copysign.f64(double, double) [[READNONE_INTRINSIC]] -  // NO__ERRNO: declare float @llvm.copysign.f32(float, float) [[READNONE_INTRINSIC]] -  // NO__ERRNO: declare x86_fp80 @llvm.copysign.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]] -  // HAS_ERRNO: declare double @llvm.copysign.f64(double, double) [[READNONE_INTRINSIC:#[0-9]+]] -  // HAS_ERRNO: declare float @llvm.copysign.f32(float, float) [[READNONE_INTRINSIC]] -  // HAS_ERRNO: declare x86_fp80 @llvm.copysign.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]] -  // HAS_MAYTRAP: declare double @llvm.copysign.f64(double, double) [[READNONE_INTRINSIC:#[0-9]+]] -  // HAS_MAYTRAP: declare float @llvm.copysign.f32(float, float) [[READNONE_INTRINSIC]] -  // HAS_MAYTRAP: declare x86_fp80 @llvm.copysign.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]] +  // NO__ERRNO: declare double @llvm.copysign.f64(double, double) [[READNONE_INTRINSIC2:#[0-9]+]] +  // NO__ERRNO: declare float @llvm.copysign.f32(float, float) [[READNONE_INTRINSIC2]] +  // NO__ERRNO: declare x86_fp80 @llvm.copysign.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC2]] +  // HAS_ERRNO: declare double @llvm.copysign.f64(double, double) [[READNONE_INTRINSIC2:#[0-9]+]] +  // HAS_ERRNO: declare float @llvm.copysign.f32(float, float) [[READNONE_INTRINSIC2]] +  // HAS_ERRNO: declare x86_fp80 @llvm.copysign.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC2]] +  // HAS_MAYTRAP: declare double @llvm.copysign.f64(double, double) [[READNONE_INTRINSIC2:#[0-9]+]] +  // HAS_MAYTRAP: declare float @llvm.copysign.f32(float, float) [[READNONE_INTRINSIC2]] +  // HAS_MAYTRAP: declare x86_fp80 @llvm.copysign.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC2]]    fabs(f);       fabsf(f);      fabsl(f); -  // NO__ERRNO: declare double @llvm.fabs.f64(double) [[READNONE_INTRINSIC]] -  // NO__ERRNO: declare float @llvm.fabs.f32(float) [[READNONE_INTRINSIC]] -  // NO__ERRNO: declare x86_fp80 @llvm.fabs.f80(x86_fp80) [[READNONE_INTRINSIC]] -  // HAS_ERRNO: declare double @llvm.fabs.f64(double) [[READNONE_INTRINSIC]] -  // HAS_ERRNO: declare float @llvm.fabs.f32(float) [[READNONE_INTRINSIC]] -  // HAS_ERRNO: declare x86_fp80 @llvm.fabs.f80(x86_fp80) [[READNONE_INTRINSIC]] -  // HAS_MAYTRAP: declare double @llvm.fabs.f64(double) [[READNONE_INTRINSIC]] -  // HAS_MAYTRAP: declare float @llvm.fabs.f32(float) [[READNONE_INTRINSIC]] -  // HAS_MAYTRAP: declare x86_fp80 @llvm.fabs.f80(x86_fp80) [[READNONE_INTRINSIC]] +  // NO__ERRNO: declare double @llvm.fabs.f64(double) [[READNONE_INTRINSIC2]] +  // NO__ERRNO: declare float @llvm.fabs.f32(float) [[READNONE_INTRINSIC2]] +  // NO__ERRNO: declare x86_fp80 @llvm.fabs.f80(x86_fp80) [[READNONE_INTRINSIC2]] +  // HAS_ERRNO: declare double @llvm.fabs.f64(double) [[READNONE_INTRINSIC2]] +  // HAS_ERRNO: declare float @llvm.fabs.f32(float) [[READNONE_INTRINSIC2]] +  // HAS_ERRNO: declare x86_fp80 @llvm.fabs.f80(x86_fp80) [[READNONE_INTRINSIC2]] +  // HAS_MAYTRAP: declare double @llvm.fabs.f64(double) [[READNONE_INTRINSIC2]] +  // HAS_MAYTRAP: declare float @llvm.fabs.f32(float) [[READNONE_INTRINSIC2]] +  // HAS_MAYTRAP: declare x86_fp80 @llvm.fabs.f80(x86_fp80) [[READNONE_INTRINSIC2]]    frexp(f,i);    frexpf(f,i);   frexpl(f,i); @@ -86,7 +86,7 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {    // NO__ERRNO: declare { double, double } @llvm.modf.f64(double) [[READNONE_INTRINSIC]]    // NO__ERRNO: declare { float, float } @llvm.modf.f32(float) [[READNONE_INTRINSIC]]    // NO__ERRNO: declare { x86_fp80, x86_fp80 } @llvm.modf.f80(x86_fp80) [[READNONE_INTRINSIC]] -  // HAS_ERRNO: declare { double, double } @llvm.modf.f64(double) [[READNONE_INTRINSIC]] +  // HAS_ERRNO: declare { double, double } @llvm.modf.f64(double) [[READNONE_INTRINSIC:#[0-9]+]]    // HAS_ERRNO: declare { float, float } @llvm.modf.f32(float) [[READNONE_INTRINSIC]]    // HAS_ERRNO: declare { x86_fp80, x86_fp80 } @llvm.modf.f80(x86_fp80) [[READNONE_INTRINSIC]]    // HAS_MAYTRAP: declare double @modf(double noundef, ptr noundef) [[NOT_READNONE]] @@ -107,9 +107,9 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {    pow(f,f);        powf(f,f);       powl(f,f); -// NO__ERRNO: declare double @llvm.pow.f64(double, double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.pow.f32(float, float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.pow.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.pow.f64(double, double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.pow.f32(float, float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.pow.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare double @pow(double noundef, double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare float @powf(float noundef, float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare x86_fp80 @powl(x86_fp80 noundef, x86_fp80 noundef) [[NOT_READNONE]] @@ -206,21 +206,21 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {    ceil(f);       ceilf(f);      ceill(f); -// NO__ERRNO: declare double @llvm.ceil.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.ceil.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.ceil.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @llvm.ceil.f64(double) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare float @llvm.ceil.f32(float) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare x86_fp80 @llvm.ceil.f80(x86_fp80) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.ceil.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.ceil.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.ceil.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare double @llvm.ceil.f64(double) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare float @llvm.ceil.f32(float) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare x86_fp80 @llvm.ceil.f80(x86_fp80) [[READNONE_INTRINSIC2]]  // HAS_MAYTRAP: declare double @llvm.experimental.constrained.ceil.f64(  // HAS_MAYTRAP: declare float @llvm.experimental.constrained.ceil.f32(  // HAS_MAYTRAP: declare x86_fp80 @llvm.experimental.constrained.ceil.f80(    cos(f);        cosf(f);       cosl(f); -// NO__ERRNO: declare double @llvm.cos.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.cos.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.cos.f80(x86_fp80) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.cos.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.cos.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.cos.f80(x86_fp80) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare double @cos(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare float @cosf(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare x86_fp80 @cosl(x86_fp80 noundef) [[NOT_READNONE]] @@ -266,9 +266,9 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {    exp(f);        expf(f);       expl(f); -// NO__ERRNO: declare double @llvm.exp.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.exp.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.exp.f80(x86_fp80) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.exp.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.exp.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.exp.f80(x86_fp80) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare double @exp(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare float @expf(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare x86_fp80 @expl(x86_fp80 noundef) [[NOT_READNONE]] @@ -278,9 +278,9 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {    exp2(f);       exp2f(f);      exp2l(f); -// NO__ERRNO: declare double @llvm.exp2.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.exp2.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.exp2.f80(x86_fp80) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.exp2.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.exp2.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.exp2.f80(x86_fp80) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare double @exp2(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare float @exp2f(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare x86_fp80 @exp2l(x86_fp80 noundef) [[NOT_READNONE]] @@ -314,21 +314,21 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {    floor(f);      floorf(f);     floorl(f); -// NO__ERRNO: declare double @llvm.floor.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.floor.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.floor.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @llvm.floor.f64(double) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare float @llvm.floor.f32(float) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare x86_fp80 @llvm.floor.f80(x86_fp80) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.floor.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.floor.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.floor.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare double @llvm.floor.f64(double) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare float @llvm.floor.f32(float) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare x86_fp80 @llvm.floor.f80(x86_fp80) [[READNONE_INTRINSIC2]]  // HAS_MAYTRAP: declare double @llvm.experimental.constrained.floor.f64  // HAS_MAYTRAP: declare float @llvm.experimental.constrained.floor.f32(  // HAS_MAYTRAP: declare x86_fp80 @llvm.experimental.constrained.floor.f80(    fma(f,f,f);        fmaf(f,f,f);       fmal(f,f,f); -// NO__ERRNO: declare double @llvm.fma.f64(double, double, double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.fma.f32(float, float, float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.fma.f64(double, double, double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.fma.f32(float, float, float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare double @fma(double noundef, double noundef, double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare float @fmaf(float noundef, float noundef, float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare x86_fp80 @fmal(x86_fp80 noundef, x86_fp80 noundef, x86_fp80 noundef) [[NOT_READNONE]] @@ -350,39 +350,39 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {    fmax(f,f);       fmaxf(f,f);      fmaxl(f,f); -// NO__ERRNO: declare double @llvm.maxnum.f64(double, double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.maxnum.f32(float, float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.maxnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @llvm.maxnum.f64(double, double) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare float @llvm.maxnum.f32(float, float) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare x86_fp80 @llvm.maxnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.maxnum.f64(double, double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.maxnum.f32(float, float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.maxnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare double @llvm.maxnum.f64(double, double) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare float @llvm.maxnum.f32(float, float) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare x86_fp80 @llvm.maxnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC2]]  // HAS_MAYTRAP: declare double @llvm.experimental.constrained.maxnum.f64(  // HAS_MAYTRAP: declare float @llvm.experimental.constrained.maxnum.f32(  // HAS_MAYTRAP: declare x86_fp80 @llvm.experimental.constrained.maxnum.f80(    fmin(f,f);       fminf(f,f);      fminl(f,f); -// NO__ERRNO: declare double @llvm.minnum.f64(double, double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.minnum.f32(float, float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.minnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @llvm.minnum.f64(double, double) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare float @llvm.minnum.f32(float, float) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare x86_fp80 @llvm.minnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.minnum.f64(double, double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.minnum.f32(float, float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.minnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare double @llvm.minnum.f64(double, double) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare float @llvm.minnum.f32(float, float) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare x86_fp80 @llvm.minnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC2]]  // HAS_MAYTRAP: declare double @llvm.experimental.constrained.minnum.f64(  // HAS_MAYTRAP: declare float @llvm.experimental.constrained.minnum.f32(  // HAS_MAYTRAP: declare x86_fp80 @llvm.experimental.constrained.minnum.f80(    fmaximum_num(*d,*d);       fmaximum_numf(f,f);      fmaximum_numl(*l,*l); -// COMMON: declare double @llvm.maximumnum.f64(double, double) [[READNONE_INTRINSIC]] -// COMMON: declare float @llvm.maximumnum.f32(float, float) [[READNONE_INTRINSIC]] -// COMMON: declare x86_fp80 @llvm.maximumnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]] +// COMMON: declare double @llvm.maximumnum.f64(double, double) [[READNONE_INTRINSIC2]] +// COMMON: declare float @llvm.maximumnum.f32(float, float) [[READNONE_INTRINSIC2]] +// COMMON: declare x86_fp80 @llvm.maximumnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC2]]    fminimum_num(*d,*d);       fminimum_numf(f,f);      fminimum_numl(*l,*l); -// COMMON: declare double @llvm.minimumnum.f64(double, double) [[READNONE_INTRINSIC]] -// COMMON: declare float @llvm.minimumnum.f32(float, float) [[READNONE_INTRINSIC]] -// COMMON: declare x86_fp80 @llvm.minimumnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]] +// COMMON: declare double @llvm.minimumnum.f64(double, double) [[READNONE_INTRINSIC2]] +// COMMON: declare float @llvm.minimumnum.f32(float, float) [[READNONE_INTRINSIC2]] +// COMMON: declare x86_fp80 @llvm.minimumnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC2]]    hypot(f,f);      hypotf(f,f);     hypotl(f,f); @@ -422,9 +422,9 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {    llrint(f);     llrintf(f);    llrintl(f); -// NO__ERRNO: declare i64 @llvm.llrint.i64.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare i64 @llvm.llrint.i64.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare i64 @llvm.llrint.i64.f80(x86_fp80) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare i64 @llvm.llrint.i64.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare i64 @llvm.llrint.i64.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare i64 @llvm.llrint.i64.f80(x86_fp80) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare i64 @llrint(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare i64 @llrintf(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare i64 @llrintl(x86_fp80 noundef) [[NOT_READNONE]] @@ -434,9 +434,9 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {    llround(f);    llroundf(f);   llroundl(f); -// NO__ERRNO: declare i64 @llvm.llround.i64.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare i64 @llvm.llround.i64.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare i64 @llvm.llround.i64.f80(x86_fp80) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare i64 @llvm.llround.i64.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare i64 @llvm.llround.i64.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare i64 @llvm.llround.i64.f80(x86_fp80) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare i64 @llround(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare i64 @llroundf(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare i64 @llroundl(x86_fp80 noundef) [[NOT_READNONE]] @@ -446,9 +446,9 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {    log(f);        logf(f);       logl(f); -// NO__ERRNO: declare double @llvm.log.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.log.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.log.f80(x86_fp80) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.log.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.log.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.log.f80(x86_fp80) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare double @log(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare float @logf(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare x86_fp80 @logl(x86_fp80 noundef) [[NOT_READNONE]] @@ -458,9 +458,9 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {    log10(f);      log10f(f);     log10l(f); -// NO__ERRNO: declare double @llvm.log10.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.log10.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.log10.f80(x86_fp80) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.log10.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.log10.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.log10.f80(x86_fp80) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare double @log10(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare float @log10f(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare x86_fp80 @log10l(x86_fp80 noundef) [[NOT_READNONE]] @@ -482,9 +482,9 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {    log2(f);       log2f(f);      log2l(f); -// NO__ERRNO: declare double @llvm.log2.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.log2.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.log2.f80(x86_fp80) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.log2.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.log2.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.log2.f80(x86_fp80) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare double @log2(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare float @log2f(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare x86_fp80 @log2l(x86_fp80 noundef) [[NOT_READNONE]] @@ -506,9 +506,9 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {    lrint(f);      lrintf(f);     lrintl(f); -// NO__ERRNO: declare i64 @llvm.lrint.i64.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare i64 @llvm.lrint.i64.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare i64 @llvm.lrint.i64.f80(x86_fp80) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare i64 @llvm.lrint.i64.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare i64 @llvm.lrint.i64.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare i64 @llvm.lrint.i64.f80(x86_fp80) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare i64 @lrint(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare i64 @lrintf(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare i64 @lrintl(x86_fp80 noundef) [[NOT_READNONE]] @@ -518,9 +518,9 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {    lround(f);     lroundf(f);    lroundl(f); -// NO__ERRNO: declare i64 @llvm.lround.i64.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare i64 @llvm.lround.i64.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare i64 @llvm.lround.i64.f80(x86_fp80) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare i64 @llvm.lround.i64.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare i64 @llvm.lround.i64.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare i64 @llvm.lround.i64.f80(x86_fp80) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare i64 @lround(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare i64 @lroundf(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare i64 @lroundl(x86_fp80 noundef) [[NOT_READNONE]] @@ -530,12 +530,12 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {    nearbyint(f);  nearbyintf(f); nearbyintl(f); -// NO__ERRNO: declare double @llvm.nearbyint.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.nearbyint.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.nearbyint.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @llvm.nearbyint.f64(double) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare float @llvm.nearbyint.f32(float) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare x86_fp80 @llvm.nearbyint.f80(x86_fp80) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.nearbyint.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.nearbyint.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.nearbyint.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare double @llvm.nearbyint.f64(double) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare float @llvm.nearbyint.f32(float) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare x86_fp80 @llvm.nearbyint.f80(x86_fp80) [[READNONE_INTRINSIC2]]  // HAS_MAYTRAP: declare double @llvm.experimental.constrained.nearbyint.f64(  // HAS_MAYTRAP: declare float @llvm.experimental.constrained.nearbyint.f32(  // HAS_MAYTRAP: declare x86_fp80 @llvm.experimental.constrained.nearbyint.f80( @@ -590,24 +590,24 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {    rint(f);       rintf(f);      rintl(f); -// NO__ERRNO: declare double @llvm.rint.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.rint.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.rint.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @llvm.rint.f64(double) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare float @llvm.rint.f32(float) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare x86_fp80 @llvm.rint.f80(x86_fp80) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.rint.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.rint.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.rint.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare double @llvm.rint.f64(double) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare float @llvm.rint.f32(float) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare x86_fp80 @llvm.rint.f80(x86_fp80) [[READNONE_INTRINSIC2]]  // HAS_MAYTRAP: declare double @llvm.experimental.constrained.rint.f64(  // HAS_MAYTRAP: declare float @llvm.experimental.constrained.rint.f32(  // HAS_MAYTRAP: declare x86_fp80 @llvm.experimental.constrained.rint.f80(    round(f);      roundf(f);     roundl(f); -// NO__ERRNO: declare double @llvm.round.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.round.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.round.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @llvm.round.f64(double) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare float @llvm.round.f32(float) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare x86_fp80 @llvm.round.f80(x86_fp80) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.round.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.round.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.round.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare double @llvm.round.f64(double) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare float @llvm.round.f32(float) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare x86_fp80 @llvm.round.f80(x86_fp80) [[READNONE_INTRINSIC2]]  // HAS_MAYTRAP: declare double @llvm.experimental.constrained.round.f64(  // HAS_MAYTRAP: declare float @llvm.experimental.constrained.round.f32(  // HAS_MAYTRAP: declare x86_fp80 @llvm.experimental.constrained.round.f80( @@ -638,9 +638,9 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {    sin(f);        sinf(f);       sinl(f); -// NO__ERRNO: declare double @llvm.sin.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.sin.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.sin.f80(x86_fp80) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.sin.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.sin.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.sin.f80(x86_fp80) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare double @sin(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare float @sinf(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare x86_fp80 @sinl(x86_fp80 noundef) [[NOT_READNONE]] @@ -674,9 +674,9 @@ sincos(f, d, d);       sincosf(f, fp, fp);        sincosl(f, l, l);    sqrt(f);       sqrtf(f);      sqrtl(f); -// NO__ERRNO: declare double @llvm.sqrt.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.sqrt.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.sqrt.f80(x86_fp80) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.sqrt.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.sqrt.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.sqrt.f80(x86_fp80) [[READNONE_INTRINSIC2]]  // HAS_ERRNO: declare double @sqrt(double noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare float @sqrtf(float noundef) [[NOT_READNONE]]  // HAS_ERRNO: declare x86_fp80 @sqrtl(x86_fp80 noundef) [[NOT_READNONE]] @@ -722,20 +722,22 @@ sincos(f, d, d);       sincosf(f, fp, fp);        sincosl(f, l, l);    trunc(f);      truncf(f);     truncl(f); -// NO__ERRNO: declare double @llvm.trunc.f64(double) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare float @llvm.trunc.f32(float) [[READNONE_INTRINSIC]] -// NO__ERRNO: declare x86_fp80 @llvm.trunc.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @llvm.trunc.f64(double) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare float @llvm.trunc.f32(float) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare x86_fp80 @llvm.trunc.f80(x86_fp80) [[READNONE_INTRINSIC]] +// NO__ERRNO: declare double @llvm.trunc.f64(double) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare float @llvm.trunc.f32(float) [[READNONE_INTRINSIC2]] +// NO__ERRNO: declare x86_fp80 @llvm.trunc.f80(x86_fp80) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare double @llvm.trunc.f64(double) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare float @llvm.trunc.f32(float) [[READNONE_INTRINSIC2]] +// HAS_ERRNO: declare x86_fp80 @llvm.trunc.f80(x86_fp80) [[READNONE_INTRINSIC2]]  };  // NO__ERRNO: attributes [[READNONE_INTRINSIC]] = { {{.*}}memory(none){{.*}} } +// NO__ERRNO: attributes [[READNONE_INTRINSIC2]] = { {{.*}}memory(none){{.*}} }  // NO__ERRNO: attributes [[NOT_READNONE]] = { nounwind {{.*}} }  // NO__ERRNO: attributes [[READNONE]] = { {{.*}}memory(none){{.*}} }  // NO__ERRNO: attributes [[READONLY]] = { {{.*}}memory(read){{.*}} }  // HAS_ERRNO: attributes [[NOT_READNONE]] = { nounwind {{.*}} } +// HAS_ERRNO: attributes [[READNONE_INTRINSIC2]] = { {{.*}}memory(none){{.*}} }  // HAS_ERRNO: attributes [[READNONE_INTRINSIC]] = { {{.*}}memory(none){{.*}} }  // HAS_ERRNO: attributes [[READONLY]] = { {{.*}}memory(read){{.*}} }  // HAS_ERRNO: attributes [[READNONE]] = { {{.*}}memory(none){{.*}} } diff --git a/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl b/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl index ea1f734..5cbf645 100644 --- a/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl +++ b/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl @@ -199,7 +199,7 @@ kernel void device_side_enqueue(global float *a, global float *b, int i) {  // SPIR32: attributes #[[ATTR0]] = { convergent noinline norecurse nounwind optnone "denormal-fp-math-f32"="preserve-sign,preserve-sign" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "uniform-work-group-size"="true" }  // SPIR32: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: readwrite) }  // SPIR32: attributes #[[ATTR2]] = { convergent noinline nounwind optnone "denormal-fp-math-f32"="preserve-sign,preserve-sign" "no-trapping-math"="true" "stack-protector-buffer-size"="8" } -// SPIR32: attributes #[[ATTR3:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } +// SPIR32: attributes #[[ATTR3:[0-9]+]] = { nocallback nocreateundeforpoison nofree nosync nounwind speculatable willreturn memory(none) }  // SPIR32: attributes #[[ATTR4]] = { convergent nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" "no-trapping-math"="true" "stack-protector-buffer-size"="8" }  // SPIR32: attributes #[[ATTR5]] = { convergent nounwind "uniform-work-group-size"="true" }  //. diff --git a/clang/test/DebugInfo/KeyInstructions/flag.cpp b/clang/test/DebugInfo/KeyInstructions/flag.cpp index 6aeeed6..4a4a5c4 100644 --- a/clang/test/DebugInfo/KeyInstructions/flag.cpp +++ b/clang/test/DebugInfo/KeyInstructions/flag.cpp @@ -8,6 +8,9 @@  // KEY-INSTRUCTIONS: "-gkey-instructions"  // NO-KEY-INSTRUCTIONS-NOT: key-instructions + +// Only expect one dwarf related flag. +// NO-DEBUG: -fdwarf2-cfi-asm  // NO-DEBUG-NOT: debug-info-kind  // NO-DEBUG-NOT: dwarf diff --git a/clang/test/Frontend/diags-interesting-source-region-colors.cpp b/clang/test/Frontend/diags-interesting-source-region-colors.cpp new file mode 100644 index 0000000..80db087 --- /dev/null +++ b/clang/test/Frontend/diags-interesting-source-region-colors.cpp @@ -0,0 +1,30 @@ +// RUN: not %clang_cc1 %s -fmessage-length=40 -fcolor-diagnostics -fno-show-source-location -Wunused-value -o - 2>&1 | FileCheck %s + +// REQUIRES: ansi-escape-sequences + +int main() { +          1 +                                                        + if; +  // CHECK: expected expression +  // CHECK-NEXT: ...+ [[MAGENTA:.\[0;34m]]if[[RESET:.\[0m]]; + +    /*😂*/1 +                                                        + if; +  // CHECK: expected expression +  // CHECK-NEXT: ...+ [[MAGENTA:.\[0;34m]]if[[RESET:.\[0m]]; + +  a + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1; +  // CHECK: use of undeclared identifier +  // CHECK-NEXT: a + [[GREEN:.\[0;32m]]1[[RESET]] + [[GREEN]]1[[RESET]] + [[GREEN]]1[[RESET]] + [[GREEN]]1[[RESET]] + [[GREEN]]1[[RESET]] ... + + +  /*😂😂😂*/ a + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1; +  // CHECK: use of undeclared identifier +  // CHECK-NEXT: [[YELLOW:.\[0;33m]]/*😂😂😂*/[[RESET]] a + [[GREEN:.\[0;32m]]1[[RESET]] + [[GREEN]]1[[RESET]] ... + +  "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"; +  // CHECK: [[GREEN:.\[0;32m]]"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"[[RESET]]; + +  "😂xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"; +  // CHECK: [[GREEN:.\[0;32m]]"😂xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"[[RESET]]; +} + + diff --git a/clang/test/Modules/Inputs/builtin-headers/system-modules.modulemap b/clang/test/Modules/Inputs/builtin-headers/system-modules.modulemap index 1869651..8ab6ae47 100644 --- a/clang/test/Modules/Inputs/builtin-headers/system-modules.modulemap +++ b/clang/test/Modules/Inputs/builtin-headers/system-modules.modulemap @@ -49,6 +49,11 @@ module cstd [system] [no_undeclared_includes] {      export *    } +  module stdckdint { +    header "stdckdint.h" +    export * +  } +    module stdcountof {      header "stdcountof.h"      export * diff --git a/clang/test/Modules/builtin-headers.mm b/clang/test/Modules/builtin-headers.mm index ad2d66a..6cd3662 100644 --- a/clang/test/Modules/builtin-headers.mm +++ b/clang/test/Modules/builtin-headers.mm @@ -17,6 +17,7 @@  @import _Builtin_stdarg;  @import _Builtin_stdatomic;  @import _Builtin_stdbool; +@import _Builtin_stdckdint;  @import _Builtin_stdcountof;  @import _Builtin_stddef;  @import _Builtin_stdint; diff --git a/clang/test/Modules/crash-enum-visibility-with-header-unit.cppm b/clang/test/Modules/crash-enum-visibility-with-header-unit.cppm new file mode 100644 index 0000000..90c5779 --- /dev/null +++ b/clang/test/Modules/crash-enum-visibility-with-header-unit.cppm @@ -0,0 +1,46 @@ +// Fixes #165445 + +// RUN: rm -rf %t +// RUN: mkdir -p %t +// RUN: split-file %s %t +// +// RUN: %clang_cc1 -std=c++20 -x c++-user-header %t/header.h \ +// RUN:   -emit-header-unit -o %t/header.pcm +// +// RUN: %clang_cc1 -std=c++20 %t/A.cppm -fmodule-file=%t/header.pcm \ +// RUN:   -emit-module-interface -o %t/A.pcm +//  +// RUN: %clang_cc1 -std=c++20 %t/B.cppm -fmodule-file=%t/header.pcm \ +// RUN:   -emit-module-interface -o %t/B.pcm +// +// RUN: %clang_cc1 -std=c++20 %t/use.cpp \ +// RUN:   -fmodule-file=A=%t/A.pcm -fmodule-file=B=%t/B.pcm  \ +// RUN:   -fmodule-file=%t/header.pcm \ +// RUN:   -verify -fsyntax-only + +//--- enum.h +enum E { Value }; + +//--- header.h +#include "enum.h" + +//--- A.cppm +module; +#include "enum.h" +export module A; + +auto e = Value; + +//--- B.cppm +export module B; +import "header.h"; + +auto e = Value; + +//--- use.cpp +// expected-no-diagnostics +import A; +import B; +#include "enum.h" + +auto e = Value; diff --git a/clang/test/Preprocessor/unwind-tables.c b/clang/test/Preprocessor/unwind-tables.c index 0a863d7..5ff990d 100644 --- a/clang/test/Preprocessor/unwind-tables.c +++ b/clang/test/Preprocessor/unwind-tables.c @@ -1,11 +1,13 @@  // RUN: %clang %s -dM -E -target x86_64-windows | FileCheck %s --check-prefix=NO  // RUN: %clang %s -dM -E -target x86_64 -fno-asynchronous-unwind-tables | FileCheck %s --check-prefix=NO +// RUN: %clang %s -dM -E -target x86_64 -fno-dwarf2-cfi-asm | FileCheck %s --check-prefix=NO  // RUN: %clang %s -dM -E -target x86_64 | FileCheck %s  // RUN: %clang %s -dM -E -target x86_64 -funwind-tables -fno-asynchronous-unwind-tables -g | FileCheck %s  // RUN: %clang %s -dM -E -target aarch64-apple-darwin | FileCheck %s  // RUN: %clang %s -dM -E -target x86_64 -fno-asynchronous-unwind-tables -g | FileCheck %s  // RUN: %clang %s -dM -E -target x86_64 -fno-asynchronous-unwind-tables -fexceptions | FileCheck %s +// RUN: %clang %s -dM -E -target x86_64-windows -fdwarf2-cfi-asm | FileCheck %s  // NO-NOT: #define __GCC_HAVE_DWARF2_CFI_ASM  // CHECK: #define __GCC_HAVE_DWARF2_CFI_ASM 1 diff --git a/clang/test/Sema/attr-nonblocking-constraints.cpp b/clang/test/Sema/attr-nonblocking-constraints.cpp index b26a945..881e816 100644 --- a/clang/test/Sema/attr-nonblocking-constraints.cpp +++ b/clang/test/Sema/attr-nonblocking-constraints.cpp @@ -235,16 +235,35 @@ void nb13() [[clang::nonblocking]] { nb12(); }  // C++ member function pointers  struct PTMFTester {  	typedef void (PTMFTester::*ConvertFunction)() [[clang::nonblocking]]; - -	void convert() [[clang::nonblocking]]; +	typedef void (PTMFTester::*BlockingFunction)();  	ConvertFunction mConvertFunc; -}; -void PTMFTester::convert() [[clang::nonblocking]] -{ -	(this->*mConvertFunc)(); -} +	void convert() [[clang::nonblocking]] +	{ +		(this->*mConvertFunc)(); // This should not generate a warning. +	} + +	template <typename T> +	struct Holder { +		T value; +		 +		T& operator*() { return value; } +	}; + + +	void ptmfInExpr(Holder<ConvertFunction>& holder) [[clang::nonblocking]] +	{ +		(this->*(*holder))();   // Should not generate a warning. +		((*this).*(*holder))(); // Should not generate a warning. +	} + +	void ptmfInExpr(Holder<BlockingFunction>& holder) [[clang::nonblocking]] +	{ +		(this->*(*holder))(); // expected-warning {{function with 'nonblocking' attribute must not call non-'nonblocking' expression}} +		((*this).*(*holder))(); // expected-warning {{function with 'nonblocking' attribute must not call non-'nonblocking' expression}} +	} +};  // Allow implicit conversion from array to pointer.  void nb14(unsigned idx) [[clang::nonblocking]] @@ -354,6 +373,33 @@ struct Unsafe {    Unsafe(float y) [[clang::nonblocking]] : Unsafe(int(y)) {} // expected-warning {{constructor with 'nonblocking' attribute must not call non-'nonblocking' constructor 'Unsafe::Unsafe'}}  }; +// Exercise cases of a temporary with a safe constructor and unsafe destructor. +void nb23() +{ +	struct X { +		int *ptr = nullptr; +		X() {} +		~X() { delete ptr; } // expected-note 2 {{destructor cannot be inferred 'nonblocking' because it allocates or deallocates memory}} +	}; + +	auto inner = []() [[clang::nonblocking]] { +		X(); // expected-warning {{lambda with 'nonblocking' attribute must not call non-'nonblocking' destructor 'nb23()::X::~X'}} +	}; + +	auto inner2 = [](X x) [[clang::nonblocking]] { // expected-warning {{lambda with 'nonblocking' attribute must not call non-'nonblocking' destructor 'nb23()::X::~X'}} +	}; + +} + +struct S2 { ~S2(); }; // expected-note 2 {{declaration cannot be inferred 'nonblocking' because it has no definition in this translation unit}} +void nb24() { +    S2 s; +    [&]() [[clang::nonblocking]] { +        [s]{ auto x = &s; }(); // expected-warning {{lambda with 'nonblocking' attribute must not call non-'nonblocking' destructor}} expected-note {{destructor cannot be inferred 'nonblocking' because it calls non-'nonblocking' destructor 'S2::~S2'}} +        [=]{ auto x = &s; }(); // expected-warning {{lambda with 'nonblocking' attribute must not call non-'nonblocking' destructor}} expected-note {{destructor cannot be inferred 'nonblocking' because it calls non-'nonblocking' destructor 'S2::~S2'}} +    }(); +} +  struct DerivedFromUnsafe : public Unsafe {    DerivedFromUnsafe() [[clang::nonblocking]] {} // expected-warning {{constructor with 'nonblocking' attribute must not call non-'nonblocking' constructor 'Unsafe::Unsafe'}}    DerivedFromUnsafe(int x) [[clang::nonblocking]] : Unsafe(x) {} // expected-warning {{constructor with 'nonblocking' attribute must not call non-'nonblocking' constructor 'Unsafe::Unsafe'}} diff --git a/clang/test/Sema/labeled-break-continue.c b/clang/test/Sema/labeled-break-continue.c index 78f81c4..6b4adc2 100644 --- a/clang/test/Sema/labeled-break-continue.c +++ b/clang/test/Sema/labeled-break-continue.c @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -std=c2y -verify -fsyntax-only -fblocks %s -// RUN: %clang_cc1 -std=c23 -verify -fsyntax-only -fblocks -fnamed-loops %s -// RUN: %clang_cc1 -x c++ -verify -fsyntax-only -fblocks -fnamed-loops %s +// RUN: %clang_cc1 -std=c2y -verify -Wunused -fsyntax-only -fblocks %s +// RUN: %clang_cc1 -std=c23 -verify -Wunused -fsyntax-only -fblocks -fnamed-loops %s +// RUN: %clang_cc1 -x c++ -verify -Wunused -fsyntax-only -fblocks -fnamed-loops %s  void f1() {    l1: while (true) { @@ -159,3 +159,15 @@ void f7() {      continue d; // expected-error {{'continue' label does not name an enclosing loop}}    }  } + +void f8() { +  l1: // no-warning +  while (true) { +    break l1; +  } + +  l2: // no-warning +  while (true) { +    continue l2; +  } +} diff --git a/clang/test/SemaCXX/vector.cpp b/clang/test/SemaCXX/vector.cpp index 808bdb6..06195f0 100644 --- a/clang/test/SemaCXX/vector.cpp +++ b/clang/test/SemaCXX/vector.cpp @@ -786,3 +786,16 @@ const long long e = *0; // expected-error {{indirection requires pointer operand  double f = a - e;       // expected-error {{cannot initialize a variable of type 'double' with an rvalue of type '__attribute__((__vector_size__(1 * sizeof(double)))) double' (vector of 1 'double' value)}}  int h = c - e;          // expected-error {{cannot initialize a variable of type 'int' with an rvalue of type '__attribute__((__vector_size__(1 * sizeof(long)))) long' (vector of 1 'long' value)}}  } + +typedef int v_neg_size __attribute__((vector_size(-8))); // expected-error{{vector must have non-negative size}} +typedef int v_neg_size_2 __attribute__((vector_size(-1 * 8))); // expected-error{{vector must have non-negative size}} +typedef int v_ext_neg_size __attribute__((ext_vector_type(-8))); // expected-error{{vector must have non-negative size}} +typedef int v_ext_neg_size2 __attribute__((ext_vector_type(-1 * 8))); // expected-error{{vector must have non-negative size}} + + +#if __cplusplus >= 201103L + +template <int N> using templated_v_size = int  __attribute__((vector_size(N))); // expected-error{{vector must have non-negative size}} +templated_v_size<-8> templated_v_neg_size; //expected-note{{in instantiation of template type alias 'templated_v_size' requested here}} + +#endif diff --git a/clang/test/SemaTemplate/ctad.cpp b/clang/test/SemaTemplate/ctad.cpp index 1a575ea..60603f0 100644 --- a/clang/test/SemaTemplate/ctad.cpp +++ b/clang/test/SemaTemplate/ctad.cpp @@ -104,3 +104,15 @@ namespace ConvertDeducedTemplateArgument {    auto x = C(D<A::B>());  } + +namespace pr165560 { +template <class T, class> struct S { +  using A = T; +  template <class> struct I { // expected-note{{candidate function template not viable: requires 1 argument, but 0 were provided}} \ +                              // expected-note{{implicit deduction guide declared as 'template <class> I(pr165560::S<int, int>::I<type-parameter-0-0>) -> pr165560::S<int, int>::I<type-parameter-0-0>'}} +    I(typename A::F) {} // expected-error{{type 'A' (aka 'int') cannot be used prior to '::' because it has no members}} +  }; +}; +S<int, int>::I i; // expected-error{{no viable constructor or deduction guide for deduction of template arguments of 'S<int, int>::I'}} \ +                  // expected-note{{while building implicit deduction guide first needed here}} +}  | 
