diff options
author | Robin Dapp <rdapp@ventanamicro.com> | 2023-06-27 16:22:55 +0200 |
---|---|---|
committer | Robin Dapp <rdapp@ventanamicro.com> | 2023-07-05 16:56:46 +0200 |
commit | df9a6cbb087d674ccee545d642e688f1979dcb3a (patch) | |
tree | 7cf2f86bf8e56c38fe3a25da33c703185cd9c064 | |
parent | 70b041684a2222b8f19200cc240a13d703b210a7 (diff) | |
download | gcc-df9a6cbb087d674ccee545d642e688f1979dcb3a.zip gcc-df9a6cbb087d674ccee545d642e688f1979dcb3a.tar.gz gcc-df9a6cbb087d674ccee545d642e688f1979dcb3a.tar.bz2 |
RISC-V: Allow variable index for vec_set.
This patch enables a variable index for vec_set and adjust the tests.
gcc/ChangeLog:
* config/riscv/autovec.md: Allow register index operand.
gcc/testsuite/ChangeLog:
* gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-1.c: Adjust
test.
* gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-2.c: Ditto.
* gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-3.c: Ditto.
* gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-4.c: Ditto.
* gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-run.c: Ditto.
* gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-zvfh-run.c:
Ditto.
7 files changed, 185 insertions, 164 deletions
diff --git a/gcc/config/riscv/autovec.md b/gcc/config/riscv/autovec.md index 0fc2bf5..7b457a5 100644 --- a/gcc/config/riscv/autovec.md +++ b/gcc/config/riscv/autovec.md @@ -1022,7 +1022,7 @@ (define_expand "vec_set<mode>" [(match_operand:V 0 "register_operand") (match_operand:<VEL> 1 "register_operand") - (match_operand 2 "immediate_operand")] + (match_operand 2 "nonmemory_operand")] "TARGET_VECTOR" { /* If we set the first element, emit an v(f)mv.s.[xf]. */ @@ -1039,12 +1039,17 @@ it at the proper position using vslideup with an "effective length" of 1 i.e. a VL 1 past the offset. */ - /* Slide offset = element index. */ - int offset = INTVAL (operands[2]); - - /* Only insert one element, i.e. VL = offset + 1. */ + /* Here we set VL = offset + 1. */ rtx length = gen_reg_rtx (Pmode); - emit_move_insn (length, GEN_INT (offset + 1)); + operands[2] = gen_lowpart (Pmode, operands[2]); + if (CONST_INT_P (operands[2])) + emit_move_insn (length, GEN_INT (INTVAL (operands[2]) + 1)); + else + { + rtx add = gen_rtx_PLUS (GET_MODE (operands[2]), + operands[2], GEN_INT (1)); + emit_move_insn (length, add); + } /* Move operands[1] into a vector register via vmv.v.x using the same VL we need for the slide. */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-1.c index 3d60e63..e97f6f5 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-1.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-1.c @@ -20,6 +20,15 @@ typedef double vnx2df __attribute__((vector_size (16))); return v; \ } +#define VEC_SET_VAR1(S,V) \ + V \ + __attribute__((noipa)) \ + vec_set_var_##V (V v, int8_t idx, S s) \ + { \ + v[idx] = s; \ + return v; \ + } \ + #define TEST_ALL1(T) \ T (_Float16, vnx8hf, 0) \ T (_Float16, vnx8hf, 3) \ @@ -43,20 +52,31 @@ typedef double vnx2df __attribute__((vector_size (16))); T (int8_t, vnx16qi, 11) \ T (int8_t, vnx16qi, 15) \ +#define TEST_ALL_VAR1(T) \ + T (_Float16, vnx8hf) \ + T (float, vnx4sf) \ + T (double, vnx2df) \ + T (int64_t, vnx2di) \ + T (int32_t, vnx4si) \ + T (int16_t, vnx8hi) \ + T (int8_t, vnx16qi) \ + TEST_ALL1 (VEC_SET) +TEST_ALL_VAR1 (VEC_SET_VAR1) /* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e8,\s*m1,\s*ta,\s*ma} 1 } } */ -/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e8,\s*m1,\s*tu,\s*ma} 4 } } */ +/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e8,\s*m1,\s*tu,\s*ma} 5 } } */ /* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e16,\s*m1,\s*ta,\s*ma} 2 } } */ -/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e16,\s*m1,\s*tu,\s*ma} 4 } } */ +/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e16,\s*m1,\s*tu,\s*ma} 6 } } */ /* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e32,\s*m1,\s*ta,\s*ma} 2 } } */ -/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e32,\s*m1,\s*tu,\s*ma} 4 } } */ +/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e32,\s*m1,\s*tu,\s*ma} 6 } } */ /* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e64,\s*m1,\s*ta,\s*ma} 2 } } */ -/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e64,\s*m1,\s*tu,\s*ma} 2 } } */ +/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e64,\s*m1,\s*tu,\s*ma} 4 } } */ -/* { dg-final { scan-assembler-times {\tvmv.v.x} 9 } } */ -/* { dg-final { scan-assembler-times {\tvfmv.v.f} 5 } } */ +/* { dg-final { scan-assembler-times {\tvmv.v.x} 13 } } */ +/* { dg-final { scan-assembler-times {\tvfmv.v.f} 8 } } */ /* { dg-final { scan-assembler-times {\tvslideup.vi} 14 } } */ +/* { dg-final { scan-assembler-times {\tvslideup.vx} 7 } } */ /* { dg-final { scan-assembler-times {\tvfmv.s.f} 3 } } */ /* { dg-final { scan-assembler-times {\tvmv.s.x} 4 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-2.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-2.c index 6929c17..6d077d6 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-2.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-2.c @@ -20,6 +20,15 @@ typedef double vnx4df __attribute__((vector_size (32))); return v; \ } +#define VEC_SET_VAR2(S,V) \ + V \ + __attribute__((noipa)) \ + vec_set_var_##V (V v, int16_t idx, S s) \ + { \ + v[idx] = s; \ + return v; \ + } \ + #define TEST_ALL2(T) \ T (_Float16, vnx16hf, 0) \ T (_Float16, vnx16hf, 3) \ @@ -55,20 +64,31 @@ typedef double vnx4df __attribute__((vector_size (32))); T (int8_t, vnx32qi, 16) \ T (int8_t, vnx32qi, 31) \ +#define TEST_ALL_VAR2(T) \ + T (_Float16, vnx16hf) \ + T (float, vnx8sf) \ + T (double, vnx4df) \ + T (int64_t, vnx4di) \ + T (int32_t, vnx8si) \ + T (int16_t, vnx16hi) \ + T (int8_t, vnx32qi) \ + TEST_ALL2 (VEC_SET) +TEST_ALL_VAR2 (VEC_SET_VAR2) /* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e8,\s*m2,\s*ta,\s*ma} 1 } } */ -/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e8,\s*m2,\s*tu,\s*ma} 4 } } */ +/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e8,\s*m2,\s*tu,\s*ma} 5 } } */ /* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e16,\s*m2,\s*ta,\s*ma} 2 } } */ -/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e16,\s*m2,\s*tu,\s*ma} 8 } } */ +/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e16,\s*m2,\s*tu,\s*ma} 10 } } */ /* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e32,\s*m2,\s*ta,\s*ma} 2 } } */ -/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e32,\s*m2,\s*tu,\s*ma} 8 } } */ +/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e32,\s*m2,\s*tu,\s*ma} 10 } } */ /* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e64,\s*m2,\s*ta,\s*ma} 2 } } */ -/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e64,\s*m2,\s*tu,\s*ma} 6 } } */ +/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e64,\s*m2,\s*tu,\s*ma} 8 } } */ -/* { dg-final { scan-assembler-times {\tvmv.v.x} 15 } } */ -/* { dg-final { scan-assembler-times {\tvfmv.v.f} 11 } } */ +/* { dg-final { scan-assembler-times {\tvmv.v.x} 19 } } */ +/* { dg-final { scan-assembler-times {\tvfmv.v.f} 14 } } */ /* { dg-final { scan-assembler-times {\tvslideup.vi} 26 } } */ +/* { dg-final { scan-assembler-times {\tvslideup.vx} 7 } } */ /* { dg-final { scan-assembler-times {\tvfmv.s.f} 3 } } */ /* { dg-final { scan-assembler-times {\tvmv.s.x} 4 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-3.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-3.c index 903deae..5f6cb32 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-3.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-3.c @@ -20,6 +20,15 @@ typedef double vnx8df __attribute__((vector_size (64))); return v; \ } +#define VEC_SET_VAR3(S,V) \ + V \ + __attribute__((noipa)) \ + vec_set_var_##V (V v, int32_t idx, S s) \ + { \ + v[idx] = s; \ + return v; \ + } \ + #define TEST_ALL3(T) \ T (_Float16, vnx32hf, 0) \ T (_Float16, vnx32hf, 3) \ @@ -56,21 +65,31 @@ typedef double vnx8df __attribute__((vector_size (64))); T (int8_t, vnx64qi, 32) \ T (int8_t, vnx64qi, 63) \ +#define TEST_ALL_VAR3(T) \ + T (_Float16, vnx32hf) \ + T (float, vnx16sf) \ + T (double, vnx8df) \ + T (int64_t, vnx8di) \ + T (int32_t, vnx16si) \ + T (int16_t, vnx32hi) \ + T (int8_t, vnx64qi) \ + TEST_ALL3 (VEC_SET) +TEST_ALL_VAR3 (VEC_SET_VAR3) /* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e8,\s*m4,\s*ta,\s*ma} 1 } } */ -/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e8,\s*m4,\s*tu,\s*ma} 4 } } */ +/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e8,\s*m4,\s*tu,\s*ma} 5 } } */ /* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e16,\s*m4,\s*ta,\s*ma} 2 } } */ -/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e16,\s*m4,\s*tu,\s*ma} 9 } } */ +/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e16,\s*m4,\s*tu,\s*ma} 11 } } */ /* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e32,\s*m4,\s*ta,\s*ma} 2 } } */ -/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e32,\s*m4,\s*tu,\s*ma} 8 } } */ +/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e32,\s*m4,\s*tu,\s*ma} 10 } } */ /* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e64,\s*m4,\s*ta,\s*ma} 2 } } */ -/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e64,\s*m4,\s*tu,\s*ma} 6 } } */ +/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e64,\s*m4,\s*tu,\s*ma} 8 } } */ -/* { dg-final { scan-assembler-times {\tvmv.v.x} 15 } } */ -/* { dg-final { scan-assembler-times {\tvfmv.v.f} 12 } } */ +/* { dg-final { scan-assembler-times {\tvmv.v.x} 19 } } */ +/* { dg-final { scan-assembler-times {\tvfmv.v.f} 15 } } */ /* { dg-final { scan-assembler-times {\tvslideup.vi} 25 } } */ -/* { dg-final { scan-assembler-times {\tvslideup.vx} 2 } } */ +/* { dg-final { scan-assembler-times {\tvslideup.vx} 9 } } */ /* { dg-final { scan-assembler-times {\tvfmv.s.f} 3 } } */ /* { dg-final { scan-assembler-times {\tvmv.s.x} 4 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-4.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-4.c index 7d73399..c6f6964 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-4.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-4.c @@ -20,6 +20,15 @@ typedef double vnx16df __attribute__((vector_size (128))); return v; \ } +#define VEC_SET_VAR4(S,V) \ + V \ + __attribute__((noipa)) \ + vec_set_var_##V (V v, int64_t idx, S s) \ + { \ + v[idx] = s; \ + return v; \ + } \ + #define TEST_ALL4(T) \ T (_Float16, vnx64hf, 0) \ T (_Float16, vnx64hf, 3) \ @@ -59,21 +68,31 @@ typedef double vnx16df __attribute__((vector_size (128))); T (int8_t, vnx128qi, 64) \ T (int8_t, vnx128qi, 127) \ +#define TEST_ALL_VAR4(T) \ + T (_Float16, vnx64hf) \ + T (float, vnx32sf) \ + T (double, vnx16df) \ + T (int64_t, vnx16di) \ + T (int32_t, vnx32si) \ + T (int16_t, vnx64hi) \ + T (int8_t, vnx128qi) \ + TEST_ALL4 (VEC_SET) +TEST_ALL_VAR4 (VEC_SET_VAR4) /* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e8,\s*m8,\s*ta,\s*ma} 1 } } */ -/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e8,\s*m8,\s*tu,\s*ma} 5 } } */ +/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e8,\s*m8,\s*tu,\s*ma} 6 } } */ /* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e16,\s*m8,\s*ta,\s*ma} 2 } } */ -/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e16,\s*m8,\s*tu,\s*ma} 11 } } */ +/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e16,\s*m8,\s*tu,\s*ma} 13 } } */ /* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e32,\s*m8,\s*ta,\s*ma} 2 } } */ -/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e32,\s*m8,\s*tu,\s*ma} 8 } } */ +/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e32,\s*m8,\s*tu,\s*ma} 10 } } */ /* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e64,\s*m8,\s*ta,\s*ma} 2 } } */ -/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e64,\s*m8,\s*tu,\s*ma} 6 } } */ +/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e64,\s*m8,\s*tu,\s*ma} 8 } } */ -/* { dg-final { scan-assembler-times {\tvmv.v.x} 16 } } */ -/* { dg-final { scan-assembler-times {\tvfmv.v.f} 14 } } */ +/* { dg-final { scan-assembler-times {\tvmv.v.x} 20 } } */ +/* { dg-final { scan-assembler-times {\tvfmv.v.f} 17 } } */ /* { dg-final { scan-assembler-times {\tvslideup.vi} 23 } } */ -/* { dg-final { scan-assembler-times {\tvslideup.vx} 7 } } */ +/* { dg-final { scan-assembler-times {\tvslideup.vx} 14 } } */ /* { dg-final { scan-assembler-times {\tvfmv.s.f} 3 } } */ /* { dg-final { scan-assembler-times {\tvmv.s.x} 4 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-run.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-run.c index 6a08f26..44a0fd6 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-run.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-run.c @@ -19,115 +19,24 @@ void check_##V##_##IDX () \ assert (res[i] == (i == IDX ? 77 : i)); \ } -#define CHECK_ALL(T) \ - T (float, vnx4sf, 0) \ - T (float, vnx4sf, 1) \ - T (float, vnx4sf, 3) \ - T (double, vnx2df, 0) \ - T (double, vnx2df, 1) \ - T (int64_t, vnx2di, 0) \ - T (int64_t, vnx2di, 1) \ - T (int32_t, vnx4si, 0) \ - T (int32_t, vnx4si, 1) \ - T (int32_t, vnx4si, 3) \ - T (int16_t, vnx8hi, 0) \ - T (int16_t, vnx8hi, 2) \ - T (int16_t, vnx8hi, 6) \ - T (int8_t, vnx16qi, 0) \ - T (int8_t, vnx16qi, 1) \ - T (int8_t, vnx16qi, 7) \ - T (int8_t, vnx16qi, 11) \ - T (int8_t, vnx16qi, 15) \ - T (float, vnx8sf, 0) \ - T (float, vnx8sf, 1) \ - T (float, vnx8sf, 3) \ - T (float, vnx8sf, 4) \ - T (float, vnx8sf, 7) \ - T (double, vnx4df, 0) \ - T (double, vnx4df, 1) \ - T (double, vnx4df, 2) \ - T (double, vnx4df, 3) \ - T (int64_t, vnx4di, 0) \ - T (int64_t, vnx4di, 1) \ - T (int64_t, vnx4di, 2) \ - T (int64_t, vnx4di, 3) \ - T (int32_t, vnx8si, 0) \ - T (int32_t, vnx8si, 1) \ - T (int32_t, vnx8si, 3) \ - T (int32_t, vnx8si, 4) \ - T (int32_t, vnx8si, 7) \ - T (int16_t, vnx16hi, 0) \ - T (int16_t, vnx16hi, 1) \ - T (int16_t, vnx16hi, 7) \ - T (int16_t, vnx16hi, 8) \ - T (int16_t, vnx16hi, 15) \ - T (int8_t, vnx32qi, 0) \ - T (int8_t, vnx32qi, 1) \ - T (int8_t, vnx32qi, 15) \ - T (int8_t, vnx32qi, 16) \ - T (int8_t, vnx32qi, 31) \ - T (float, vnx16sf, 0) \ - T (float, vnx16sf, 2) \ - T (float, vnx16sf, 6) \ - T (float, vnx16sf, 8) \ - T (float, vnx16sf, 14) \ - T (double, vnx8df, 0) \ - T (double, vnx8df, 2) \ - T (double, vnx8df, 4) \ - T (double, vnx8df, 6) \ - T (int64_t, vnx8di, 0) \ - T (int64_t, vnx8di, 2) \ - T (int64_t, vnx8di, 4) \ - T (int64_t, vnx8di, 6) \ - T (int32_t, vnx16si, 0) \ - T (int32_t, vnx16si, 2) \ - T (int32_t, vnx16si, 6) \ - T (int32_t, vnx16si, 8) \ - T (int32_t, vnx16si, 14) \ - T (int16_t, vnx32hi, 0) \ - T (int16_t, vnx32hi, 2) \ - T (int16_t, vnx32hi, 14) \ - T (int16_t, vnx32hi, 16) \ - T (int16_t, vnx32hi, 30) \ - T (int8_t, vnx64qi, 0) \ - T (int8_t, vnx64qi, 2) \ - T (int8_t, vnx64qi, 30) \ - T (int8_t, vnx64qi, 32) \ - T (int8_t, vnx64qi, 63) \ - T (float, vnx32sf, 0) \ - T (float, vnx32sf, 3) \ - T (float, vnx32sf, 12) \ - T (float, vnx32sf, 17) \ - T (float, vnx32sf, 14) \ - T (double, vnx16df, 0) \ - T (double, vnx16df, 4) \ - T (double, vnx16df, 8) \ - T (double, vnx16df, 12) \ - T (int64_t, vnx16di, 0) \ - T (int64_t, vnx16di, 4) \ - T (int64_t, vnx16di, 8) \ - T (int64_t, vnx16di, 12) \ - T (int32_t, vnx32si, 0) \ - T (int32_t, vnx32si, 4) \ - T (int32_t, vnx32si, 12) \ - T (int32_t, vnx32si, 16) \ - T (int32_t, vnx32si, 28) \ - T (int16_t, vnx64hi, 0) \ - T (int16_t, vnx64hi, 4) \ - T (int16_t, vnx64hi, 28) \ - T (int16_t, vnx64hi, 32) \ - T (int16_t, vnx64hi, 60) \ - T (int8_t, vnx128qi, 0) \ - T (int8_t, vnx128qi, 4) \ - T (int8_t, vnx128qi, 30) \ - T (int8_t, vnx128qi, 60) \ - T (int8_t, vnx128qi, 64) \ - T (int8_t, vnx128qi, 127) \ - -CHECK_ALL (CHECK) +#define CHECK_VAR(S, V) \ +__attribute__ ((noipa)) \ +void check_var_##V (int32_t idx) \ + { \ + V v; \ + for (int i = 0; i < sizeof (V) / sizeof (S); i++) \ + v[i] = i; \ + V res = vec_set_var_##V (v, idx, 77); \ + for (int i = 0; i < sizeof (V) / sizeof (S); i++) \ + assert (res[i] == (i == idx ? 77 : i)); \ + } #define RUN(S, V, IDX) \ - check_##V##_##IDX (); + check_##V##_##IDX (); \ + +#define RUN_VAR(S, V) \ + for (int i = 0; i < sizeof (V) / sizeof (S); i++) \ + check_var_##V (i); \ #define RUN_ALL(T) \ T (float, vnx4sf, 0) \ @@ -234,7 +143,37 @@ CHECK_ALL (CHECK) T (int8_t, vnx128qi, 64) \ T (int8_t, vnx128qi, 127) \ +#define RUN_ALL_VAR(T) \ + T (float, vnx4sf) \ + T (double, vnx2df) \ + T (int64_t, vnx2di) \ + T (int32_t, vnx4si) \ + T (int16_t, vnx8hi) \ + T (int8_t, vnx16qi) \ + T (float, vnx8sf) \ + T (double, vnx4df) \ + T (int64_t, vnx4di) \ + T (int32_t, vnx8si) \ + T (int16_t, vnx16hi) \ + T (int8_t, vnx32qi) \ + T (float, vnx16sf) \ + T (double, vnx8df) \ + T (int64_t, vnx8di) \ + T (int32_t, vnx16si) \ + T (int16_t, vnx32hi) \ + T (int8_t, vnx64qi) \ + T (float, vnx32sf) \ + T (double, vnx16df) \ + T (int64_t, vnx16di) \ + T (int32_t, vnx32si) \ + T (int16_t, vnx64hi) \ + T (int8_t, vnx128qi) \ + +RUN_ALL (CHECK) +RUN_ALL_VAR (CHECK_VAR) + int main () { RUN_ALL (RUN); + RUN_ALL_VAR (RUN_VAR); } diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-zvfh-run.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-zvfh-run.c index df8363e..7e5a73a 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-zvfh-run.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/vec_set-zvfh-run.c @@ -19,35 +19,24 @@ void check_##V##_##IDX () \ assert (res[i] == (i == IDX ? 77 : i)); \ } -#define CHECK_ALL(T) \ - T (_Float16, vnx8hf, 0) \ - T (_Float16, vnx8hf, 3) \ - T (_Float16, vnx8hf, 7) \ - T (_Float16, vnx16hf, 0) \ - T (_Float16, vnx16hf, 3) \ - T (_Float16, vnx16hf, 7) \ - T (_Float16, vnx16hf, 8) \ - T (_Float16, vnx16hf, 15) \ - T (_Float16, vnx32hf, 0) \ - T (_Float16, vnx32hf, 3) \ - T (_Float16, vnx32hf, 7) \ - T (_Float16, vnx32hf, 8) \ - T (_Float16, vnx32hf, 16) \ - T (_Float16, vnx32hf, 31) \ - T (_Float16, vnx64hf, 0) \ - T (_Float16, vnx64hf, 3) \ - T (_Float16, vnx64hf, 7) \ - T (_Float16, vnx64hf, 8) \ - T (_Float16, vnx64hf, 16) \ - T (_Float16, vnx64hf, 31) \ - T (_Float16, vnx64hf, 42) \ - T (_Float16, vnx64hf, 63) \ - -CHECK_ALL (CHECK) +#define CHECK_VAR(S, V) \ +void check_var_##V (int32_t idx) \ + { \ + V v; \ + for (int i = 0; i < sizeof (V) / sizeof (S); i++) \ + v[i] = i; \ + V res = vec_set_var_##V (v, idx, 77); \ + for (int i = 0; i < sizeof (V) / sizeof (S); i++) \ + assert (res[i] == (i == idx ? 77 : i)); \ + } #define RUN(S, V, IDX) \ check_##V##_##IDX (); +#define RUN_VAR(S, V) \ + for (int i = 0; i < sizeof (V) / sizeof (S); i++) \ + check_var_##V (i); \ + #define RUN_ALL(T) \ T (_Float16, vnx8hf, 0) \ T (_Float16, vnx8hf, 3) \ @@ -72,7 +61,17 @@ CHECK_ALL (CHECK) T (_Float16, vnx64hf, 42) \ T (_Float16, vnx64hf, 63) \ +#define RUN_ALL_VAR(T) \ + T (_Float16, vnx8hf) \ + T (_Float16, vnx16hf) \ + T (_Float16, vnx32hf) \ + T (_Float16, vnx64hf) \ + +RUN_ALL (CHECK) +RUN_ALL_VAR (CHECK_VAR) + int main () { RUN_ALL (RUN); + RUN_ALL_VAR (RUN_VAR); } |