aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorJuzhe-Zhong <juzhe.zhong@rivai.ai>2023-06-20 17:00:31 +0800
committerPan Li <pan2.li@intel.com>2023-06-20 21:59:22 +0800
commit1c0b118babcd56dc886976b81727a9a77fc311c3 (patch)
tree0673a73e19d0a1a13106373a2851b0242b574250 /gcc
parentb26f1735cb8dcf690e9bd25f27d9f35002f3a291 (diff)
downloadgcc-1c0b118babcd56dc886976b81727a9a77fc311c3.zip
gcc-1c0b118babcd56dc886976b81727a9a77fc311c3.tar.gz
gcc-1c0b118babcd56dc886976b81727a9a77fc311c3.tar.bz2
RISC-V: Optimize codegen of VLA SLP
Add comments for Robin: We want to create a pattern where value[ix] = floor (ix / NPATTERNS). As NPATTERNS is always a power of two we can rewrite this as = ix & -NPATTERNS. ` Recently, I figure out a better approach in case of codegen for VLA stepped vector. Here is the detail descriptions: Case 1: void f (uint8_t *restrict a, uint8_t *restrict b) { for (int i = 0; i < 100; ++i) { a[i * 8] = b[i * 8 + 37] + 1; a[i * 8 + 1] = b[i * 8 + 37] + 2; a[i * 8 + 2] = b[i * 8 + 37] + 3; a[i * 8 + 3] = b[i * 8 + 37] + 4; a[i * 8 + 4] = b[i * 8 + 37] + 5; a[i * 8 + 5] = b[i * 8 + 37] + 6; a[i * 8 + 6] = b[i * 8 + 37] + 7; a[i * 8 + 7] = b[i * 8 + 37] + 8; } } We need to generate the stepped vector: NPATTERNS = 8. { 0, 0, 0, 0, 0, 0, 0, 0, 8, 8, 8, 8, 8, 8, 8, 8 } Before this patch: vid.v v4 ;; {0,1,2,3,4,5,6,7,...} vsrl.vi v4,v4,3 ;; {0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,...} li a3,8 ;; {8} vmul.vx v4,v4,a3 ;; {0,0,0,0,0,0,0,8,8,8,8,8,8,8,8,...} After this patch: vid.v v4 ;; {0,1,2,3,4,5,6,7,...} vand.vi v4,v4,-8(-NPATTERNS) ;; {0,0,0,0,0,0,0,8,8,8,8,8,8,8,8,...} Case 2: void f (uint8_t *restrict a, uint8_t *restrict b) { for (int i = 0; i < 100; ++i) { a[i * 8] = b[i * 8 + 3] + 1; a[i * 8 + 1] = b[i * 8 + 2] + 2; a[i * 8 + 2] = b[i * 8 + 1] + 3; a[i * 8 + 3] = b[i * 8 + 0] + 4; a[i * 8 + 4] = b[i * 8 + 7] + 5; a[i * 8 + 5] = b[i * 8 + 6] + 6; a[i * 8 + 6] = b[i * 8 + 5] + 7; a[i * 8 + 7] = b[i * 8 + 4] + 8; } } We need to generate the stepped vector: NPATTERNS = 4. { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12, ... } Before this patch: li a6,134221824 slli a6,a6,5 addi a6,a6,3 ;; 64-bit: 0x0003000200010000 vmv.v.x v6,a6 ;; {3, 2, 1, 0, ... } vid.v v4 ;; {0, 1, 2, 3, 4, 5, 6, 7, ... } vsrl.vi v4,v4,2 ;; {0, 0, 0, 0, 1, 1, 1, 1, ... } li a3,4 ;; {4} vmul.vx v4,v4,a3 ;; {0, 0, 0, 0, 4, 4, 4, 4, ... } vadd.vv v4,v4,v6 ;; {3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12, ... } After this patch: li a3,-536875008 slli a3,a3,4 addi a3,a3,1 slli a3,a3,16 vmv.v.x v2,a3 ;; {3, 1, -1, -3, ... } vid.v v4 ;; {0, 1, 2, 3, 4, 5, 6, 7, ... } vadd.vv v4,v4,v2 ;; {3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12, ... } gcc/ChangeLog: * config/riscv/riscv-v.cc (expand_const_vector): Optimize codegen. gcc/testsuite/ChangeLog: * gcc.target/riscv/rvv/autovec/partial/slp-1.c: Adapt testcase. * gcc.target/riscv/rvv/autovec/partial/slp-16.c: New test. * gcc.target/riscv/rvv/autovec/partial/slp_run-16.c: New test.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/config/riscv/riscv-v.cc81
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/partial/slp-1.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/partial/slp-16.c24
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/partial/slp_run-16.c66
4 files changed, 128 insertions, 45 deletions
diff --git a/gcc/config/riscv/riscv-v.cc b/gcc/config/riscv/riscv-v.cc
index 79c0337..839a2c6 100644
--- a/gcc/config/riscv/riscv-v.cc
+++ b/gcc/config/riscv/riscv-v.cc
@@ -1128,7 +1128,7 @@ expand_const_vector (rtx target, rtx src)
builder.quick_push (CONST_VECTOR_ELT (src, i * npatterns + j));
}
builder.finalize ();
-
+
if (CONST_VECTOR_DUPLICATE_P (src))
{
/* Handle the case with repeating sequence that NELTS_PER_PATTERN = 1
@@ -1204,61 +1204,52 @@ expand_const_vector (rtx target, rtx src)
if (builder.single_step_npatterns_p ())
{
/* Describe the case by choosing NPATTERNS = 4 as an example. */
- rtx base, step;
+ insn_code icode;
+
+ /* Step 1: Generate vid = { 0, 1, 2, 3, 4, 5, 6, 7, ... }. */
+ rtx vid = gen_reg_rtx (builder.mode ());
+ rtx vid_ops[] = {vid};
+ icode = code_for_pred_series (builder.mode ());
+ emit_vlmax_insn (icode, RVV_MISC_OP, vid_ops);
+
if (builder.npatterns_all_equal_p ())
{
/* Generate the variable-length vector following this rule:
{ a, a, a + step, a + step, a + step * 2, a + step * 2, ...}
E.g. { 0, 0, 8, 8, 16, 16, ... } */
- /* Step 1: Generate base = { 0, 0, 0, 0, 0, 0, 0, ... }. */
- base = expand_vector_broadcast (builder.mode (), builder.elt (0));
+ /* We want to create a pattern where value[ix] = floor (ix /
+ NPATTERNS). As NPATTERNS is always a power of two we can
+ rewrite this as = ix & -NPATTERNS. */
+ /* Step 2: VID AND -NPATTERNS:
+ { 0&-4, 1&-4, 2&-4, 3 &-4, 4 &-4, 5 &-4, 6 &-4, 7 &-4, ... }
+ */
+ rtx imm
+ = gen_int_mode (-builder.npatterns (), builder.inner_mode ());
+ rtx and_ops[] = {target, vid, imm};
+ icode = code_for_pred_scalar (AND, builder.mode ());
+ emit_vlmax_insn (icode, RVV_BINOP, and_ops);
}
else
{
/* Generate the variable-length vector following this rule:
{ a, b, a, b, a + step, b + step, a + step*2, b + step*2, ...}
- E.g. { 0, 6, 0, 6, 8, 14, 8, 14, 16, 22, 16, 22, ... } */
- /* Step 1: Generate base = { 0, 6, 0, 6, ... }. */
- rvv_builder new_builder (builder.mode (), builder.npatterns (),
- 1);
- for (unsigned int i = 0; i < builder.npatterns (); ++i)
- new_builder.quick_push (builder.elt (i));
- rtx new_vec = new_builder.build ();
- base = gen_reg_rtx (builder.mode ());
- emit_move_insn (base, new_vec);
+ E.g. { 3, 2, 1, 0, 7, 6, 5, 4, ... } */
+ /* Step 2: Generate diff = TARGET - VID:
+ { 3-0, 2-1, 1-2, 0-3, 7-4, 6-5, 5-6, 4-7, ... }*/
+ rvv_builder v (builder.mode (), builder.npatterns (), 1);
+ for (unsigned int i = 0; i < v.npatterns (); ++i)
+ {
+ /* Calculate the diff between the target sequence and
+ vid sequence. */
+ HOST_WIDE_INT diff = INTVAL (builder.elt (i)) - i;
+ v.quick_push (gen_int_mode (diff, v.inner_mode ()));
+ }
+ /* Step 2: Generate result = VID + diff. */
+ rtx vec = v.build ();
+ rtx add_ops[] = {target, vid, vec};
+ emit_vlmax_insn (code_for_pred (PLUS, builder.mode ()), RVV_BINOP,
+ add_ops);
}
-
- /* Step 2: Generate step = gen_int_mode (diff, mode). */
- poly_int64 value1 = rtx_to_poly_int64 (builder.elt (0));
- poly_int64 value2
- = rtx_to_poly_int64 (builder.elt (builder.npatterns ()));
- poly_int64 diff = value2 - value1;
- step = gen_int_mode (diff, builder.inner_mode ());
-
- /* Step 3: Generate vid = { 0, 1, 2, 3, 4, 5, 6, 7, ... }. */
- rtx vid = gen_reg_rtx (builder.mode ());
- rtx op[] = {vid};
- emit_vlmax_insn (code_for_pred_series (builder.mode ()), RVV_MISC_OP,
- op);
-
- /* Step 4: Generate factor = { 0, 0, 0, 0, 1, 1, 1, 1, ... }. */
- rtx factor = gen_reg_rtx (builder.mode ());
- rtx shift_ops[]
- = {factor, vid,
- gen_int_mode (exact_log2 (builder.npatterns ()), Pmode)};
- emit_vlmax_insn (code_for_pred_scalar (LSHIFTRT, builder.mode ()),
- RVV_BINOP, shift_ops);
-
- /* Step 5: Generate adjusted step = { 0, 0, 0, 0, diff, diff, ... } */
- rtx adjusted_step = gen_reg_rtx (builder.mode ());
- rtx mul_ops[] = {adjusted_step, factor, step};
- emit_vlmax_insn (code_for_pred_scalar (MULT, builder.mode ()),
- RVV_BINOP, mul_ops);
-
- /* Step 6: Generate the final result. */
- rtx add_ops[] = {target, base, adjusted_step};
- emit_vlmax_insn (code_for_pred (PLUS, builder.mode ()), RVV_BINOP,
- add_ops);
}
else
/* TODO: We will enable more variable-length vector in the future. */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/partial/slp-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/partial/slp-1.c
index befb518..0bce836 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/partial/slp-1.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/partial/slp-1.c
@@ -20,3 +20,5 @@ f (int8_t *restrict a, int8_t *restrict b, int n)
}
/* { dg-final { scan-tree-dump-times "\.VEC_PERM" 1 "optimized" } } */
+/* { dg-final { scan-assembler {\tvid\.v} } } */
+/* { dg-final { scan-assembler {\tvand} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/partial/slp-16.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/partial/slp-16.c
new file mode 100644
index 0000000..1a35bba
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/partial/slp-16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-march=rv32gcv -mabi=ilp32d --param riscv-autovec-preference=scalable -fdump-tree-optimized-details" } */
+
+#include <stdint-gcc.h>
+
+void
+f (uint8_t *restrict a, uint8_t *restrict b, int n)
+{
+ for (int i = 0; i < n; ++i)
+ {
+ a[i * 8] = b[i * 8 + 3] + 1;
+ a[i * 8 + 1] = b[i * 8 + 2] + 2;
+ a[i * 8 + 2] = b[i * 8 + 1] + 3;
+ a[i * 8 + 3] = b[i * 8 + 0] + 4;
+ a[i * 8 + 4] = b[i * 8 + 7] + 5;
+ a[i * 8 + 5] = b[i * 8 + 6] + 6;
+ a[i * 8 + 6] = b[i * 8 + 5] + 7;
+ a[i * 8 + 7] = b[i * 8 + 4] + 8;
+ }
+}
+
+/* { dg-final { scan-tree-dump-times "\.VEC_PERM" 1 "optimized" } } */
+/* { dg-final { scan-assembler {\tvid\.v} } } */
+/* { dg-final { scan-assembler-not {\tvmul} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/partial/slp_run-16.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/partial/slp_run-16.c
new file mode 100644
index 0000000..765ec51
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/partial/slp_run-16.c
@@ -0,0 +1,66 @@
+/* { dg-do run { target { riscv_vector } } } */
+/* { dg-additional-options "--param riscv-autovec-preference=scalable" } */
+
+#include "slp-16.c"
+
+#define LIMIT 128
+void __attribute__ ((optimize (0)))
+f_golden (int8_t *restrict a, int8_t *restrict b, int n)
+{
+ for (int i = 0; i < n; ++i)
+ {
+ a[i * 8] = b[i * 8 + 3] + 1;
+ a[i * 8 + 1] = b[i * 8 + 2] + 2;
+ a[i * 8 + 2] = b[i * 8 + 1] + 3;
+ a[i * 8 + 3] = b[i * 8 + 0] + 4;
+ a[i * 8 + 4] = b[i * 8 + 7] + 5;
+ a[i * 8 + 5] = b[i * 8 + 6] + 6;
+ a[i * 8 + 6] = b[i * 8 + 5] + 7;
+ a[i * 8 + 7] = b[i * 8 + 4] + 8;
+ }
+}
+
+int
+main (void)
+{
+#define RUN(NUM) \
+ int8_t a_##NUM[NUM * 8 + 8] = {0}; \
+ int8_t a_golden_##NUM[NUM * 8 + 8] = {0}; \
+ int8_t b_##NUM[NUM * 8 + 8] = {0}; \
+ for (int i = 0; i < NUM * 8 + 8; i++) \
+ { \
+ if (i % NUM == 0) \
+ b_##NUM[i] = (i + NUM) % LIMIT; \
+ else \
+ b_##NUM[i] = (i - NUM) % (-LIMIT); \
+ } \
+ f (a_##NUM, b_##NUM, NUM); \
+ f_golden (a_golden_##NUM, b_##NUM, NUM); \
+ for (int i = 0; i < NUM * 8 + 8; i++) \
+ { \
+ if (a_##NUM[i] != a_golden_##NUM[i]) \
+ __builtin_abort (); \
+ }
+
+ RUN (3);
+ RUN (5);
+ RUN (15);
+ RUN (16);
+ RUN (17);
+ RUN (31);
+ RUN (32);
+ RUN (33);
+ RUN (63);
+ RUN (64);
+ RUN (65);
+ RUN (127);
+ RUN (128);
+ RUN (129);
+ RUN (239);
+ RUN (359);
+ RUN (498);
+ RUN (799);
+ RUN (977);
+ RUN (5789);
+ return 0;
+}