aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorRobin Dapp <rdapp@ventanamicro.com>2023-07-31 17:54:35 +0200
committerRobin Dapp <rdapp@ventanamicro.com>2023-08-16 14:27:41 +0200
commit694242930906d9f7ad15977cac6dcbeae1f3d3f2 (patch)
treef909bc4be33644751025e5d01ca43bb2c0e55353 /gcc
parentc94e0f52f40310b6faeae11bae3366ccb1435199 (diff)
downloadgcc-694242930906d9f7ad15977cac6dcbeae1f3d3f2.zip
gcc-694242930906d9f7ad15977cac6dcbeae1f3d3f2.tar.gz
gcc-694242930906d9f7ad15977cac6dcbeae1f3d3f2.tar.bz2
RISC-V: Implement vector "average" autovec pattern.
This patch adds vector average patterns op[0] = (narrow) ((wide) op[1] + (wide) op[2]) >> 1; op[0] = (narrow) ((wide) op[1] + (wide) op[2] + 1) >> 1; If there is no direct support, the vectorizer can synthesize the pattern but, presumably, due to lack of narrowing operation support, won't try a narrowing shift. Therefore, this patch implements the expanders instead. gcc/ChangeLog: * config/riscv/autovec.md (<u>avg<v_double_trunc>3_floor): Implement expander. (<u>avg<v_double_trunc>3_ceil): Ditto. * config/riscv/vector-iterators.md (ashiftrt): New iterator. (ASHIFTRT): Ditto. gcc/testsuite/ChangeLog: * gcc.target/riscv/rvv/autovec/widen/vec-avg-run.c: New test. * gcc.target/riscv/rvv/autovec/widen/vec-avg-rv32gcv.c: New test. * gcc.target/riscv/rvv/autovec/widen/vec-avg-rv64gcv.c: New test. * gcc.target/riscv/rvv/autovec/widen/vec-avg-template.h: New test.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/config/riscv/autovec.md66
-rw-r--r--gcc/config/riscv/vector-iterators.md5
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/vec-avg-run.c85
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/vec-avg-rv32gcv.c10
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/vec-avg-rv64gcv.c10
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/vec-avg-template.h33
6 files changed, 209 insertions, 0 deletions
diff --git a/gcc/config/riscv/autovec.md b/gcc/config/riscv/autovec.md
index 21cf2ff..acca4c2 100644
--- a/gcc/config/riscv/autovec.md
+++ b/gcc/config/riscv/autovec.md
@@ -2044,3 +2044,69 @@
riscv_vector::reduction_type::MASK_LEN_FOLD_LEFT);
DONE;
})
+
+;; -------------------------------------------------------------------------
+;; ---- [INT] Average.
+;; -------------------------------------------------------------------------
+;; Implements the following "average" patterns:
+;; floor:
+;; op[0] = (narrow) ((wide) op[1] + (wide) op[2]) >> 1;
+;; ceil:
+;; op[0] = (narrow) ((wide) op[1] + (wide) op[2] + 1)) >> 1;
+;; -------------------------------------------------------------------------
+
+(define_expand "<u>avg<v_double_trunc>3_floor"
+ [(set (match_operand:<V_DOUBLE_TRUNC> 0 "register_operand")
+ (truncate:<V_DOUBLE_TRUNC>
+ (<ext_to_rshift>:VWEXTI
+ (plus:VWEXTI
+ (any_extend:VWEXTI
+ (match_operand:<V_DOUBLE_TRUNC> 1 "register_operand"))
+ (any_extend:VWEXTI
+ (match_operand:<V_DOUBLE_TRUNC> 2 "register_operand"))))))]
+ "TARGET_VECTOR"
+{
+ /* First emit a widening addition. */
+ rtx tmp1 = gen_reg_rtx (<MODE>mode);
+ rtx ops1[] = {tmp1, operands[1], operands[2]};
+ insn_code icode = code_for_pred_dual_widen (PLUS, <CODE>, <MODE>mode);
+ riscv_vector::emit_vlmax_insn (icode, riscv_vector::RVV_BINOP, ops1);
+
+ /* Then a narrowing shift. */
+ rtx ops2[] = {operands[0], tmp1, const1_rtx};
+ icode = code_for_pred_narrow_scalar (<EXT_TO_RSHIFT>, <MODE>mode);
+ riscv_vector::emit_vlmax_insn (icode, riscv_vector::RVV_BINOP, ops2);
+ DONE;
+})
+
+(define_expand "<u>avg<v_double_trunc>3_ceil"
+ [(set (match_operand:<V_DOUBLE_TRUNC> 0 "register_operand")
+ (truncate:<V_DOUBLE_TRUNC>
+ (<ext_to_rshift>:VWEXTI
+ (plus:VWEXTI
+ (plus:VWEXTI
+ (any_extend:VWEXTI
+ (match_operand:<V_DOUBLE_TRUNC> 1 "register_operand"))
+ (any_extend:VWEXTI
+ (match_operand:<V_DOUBLE_TRUNC> 2 "register_operand")))
+ (const_int 1)))))]
+ "TARGET_VECTOR"
+{
+ /* First emit a widening addition. */
+ rtx tmp1 = gen_reg_rtx (<MODE>mode);
+ rtx ops1[] = {tmp1, operands[1], operands[2]};
+ insn_code icode = code_for_pred_dual_widen (PLUS, <CODE>, <MODE>mode);
+ riscv_vector::emit_vlmax_insn (icode, riscv_vector::RVV_BINOP, ops1);
+
+ /* Then add 1. */
+ rtx tmp2 = gen_reg_rtx (<MODE>mode);
+ rtx ops2[] = {tmp2, tmp1, const1_rtx};
+ icode = code_for_pred_scalar (PLUS, <MODE>mode);
+ riscv_vector::emit_vlmax_insn (icode, riscv_vector::RVV_BINOP, ops2);
+
+ /* Finally, a narrowing shift. */
+ rtx ops3[] = {operands[0], tmp2, const1_rtx};
+ icode = code_for_pred_narrow_scalar (<EXT_TO_RSHIFT>, <MODE>mode);
+ riscv_vector::emit_vlmax_insn (icode, riscv_vector::RVV_BINOP, ops3);
+ DONE;
+})
diff --git a/gcc/config/riscv/vector-iterators.md b/gcc/config/riscv/vector-iterators.md
index fc237ac..4023a03 100644
--- a/gcc/config/riscv/vector-iterators.md
+++ b/gcc/config/riscv/vector-iterators.md
@@ -2040,6 +2040,11 @@
(define_code_attr nmsub_nmadd [(plus "nmsub") (minus "nmadd")])
(define_code_attr nmsac_nmacc [(plus "nmsac") (minus "nmacc")])
+(define_code_attr ext_to_rshift [(sign_extend "ashiftrt")
+ (zero_extend "lshiftrt")])
+(define_code_attr EXT_TO_RSHIFT [(sign_extend "ASHIFTRT")
+ (zero_extend "LSHIFTRT")])
+
(define_code_iterator and_ior [and ior])
(define_code_iterator any_float_binop [plus mult minus div])
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/vec-avg-run.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/vec-avg-run.c
new file mode 100644
index 0000000..7ca193e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/vec-avg-run.c
@@ -0,0 +1,85 @@
+/* { dg-do run { target { riscv_vector } } } */
+/* { dg-additional-options "-std=c99 -fno-vect-cost-model --param=riscv-autovec-preference=scalable -lm" } */
+
+#include <limits.h>
+#include <math.h>
+#include <assert.h>
+
+#include "vec-avg-template.h"
+
+#define SZ 256
+
+#define RUNS1(TYPE, SCALE) \
+ TYPE a##TYPE[SZ + 1]; \
+ TYPE b##TYPE[SZ + 1]; \
+ TYPE dst##TYPE[SZ + 1]; \
+ for (int cnt = 0, i = -(SZ * SCALE) / 2; i < (SZ * SCALE) / 2; i += SCALE) \
+ { \
+ a##TYPE[cnt] = i; \
+ b##TYPE[cnt] = i + 1; \
+ dst##TYPE[cnt++] = 0; \
+ } \
+ vavg_##TYPE (dst##TYPE, a##TYPE, b##TYPE, SZ); \
+ for (int i = 0; i < SZ; i += SCALE) \
+ assert (dst##TYPE[i] == floor ((a##TYPE[i] + b##TYPE[i]) / 2.0));
+
+#define RUNU1(TYPE, SCALE) \
+ TYPE a##TYPE[SZ + 1]; \
+ TYPE b##TYPE[SZ + 1]; \
+ TYPE dst##TYPE[SZ + 1]; \
+ for (int cnt = 0, i = 0; i < (SZ * SCALE); i += SCALE) \
+ { \
+ a##TYPE[cnt] = i; \
+ b##TYPE[cnt] = i + 1; \
+ dst##TYPE[cnt++] = 0; \
+ } \
+ vavg_##TYPE (dst##TYPE, a##TYPE, b##TYPE, SZ); \
+ for (int i = 0; i < SZ; i += SCALE) \
+ assert (dst##TYPE[i] == floor ((a##TYPE[i] + b##TYPE[i]) / 2.0));
+
+#define RUNS2(TYPE, SCALE) \
+ TYPE a2##TYPE[SZ + 1]; \
+ TYPE b2##TYPE[SZ + 1]; \
+ TYPE dst2##TYPE[SZ + 1]; \
+ for (int cnt = 0, i = -(SZ * SCALE) / 2; i < (SZ * SCALE) / 2; i += SCALE) \
+ { \
+ a2##TYPE[cnt] = i; \
+ b2##TYPE[cnt] = i + 1; \
+ dst2##TYPE[cnt++] = 0; \
+ } \
+ vavg2_##TYPE (dst2##TYPE, a2##TYPE, b2##TYPE, SZ); \
+ for (int i = 0; i < SZ; i += SCALE) \
+ assert (dst2##TYPE[i] == ceil ((a2##TYPE[i] + b2##TYPE[i]) / 2.0));
+
+#define RUNU2(TYPE, SCALE) \
+ TYPE a2##TYPE[SZ + 1]; \
+ TYPE b2##TYPE[SZ + 1]; \
+ TYPE dst2##TYPE[SZ + 1]; \
+ for (int cnt = 0, i = 0; i < (SZ * SCALE); i += SCALE) \
+ { \
+ a2##TYPE[cnt] = i; \
+ b2##TYPE[cnt] = i + 1; \
+ dst2##TYPE[cnt++] = 0; \
+ } \
+ vavg2_##TYPE (dst2##TYPE, a2##TYPE, b2##TYPE, SZ); \
+ for (int i = 0; i < SZ; i += SCALE) \
+ assert (dst2##TYPE[i] == ceil ((a2##TYPE[i] + b2##TYPE[i]) / 2.0));
+
+#define RUN_ALL() \
+ RUNS1 (int8_t, 1) \
+ RUNS1 (int16_t, 256) \
+ RUNS1 (int32_t, 65536) \
+ RUNU1 (uint8_t, 1) \
+ RUNU1 (uint16_t, 256) \
+ RUNU1 (uint32_t, 65536) \
+ RUNS2 (int8_t, 1) \
+ RUNS2 (int16_t, 256) \
+ RUNS2 (int32_t, 65536) \
+ RUNU2 (uint8_t, 1) \
+ RUNU2 (uint16_t, 256) \
+ RUNU2 (uint32_t, 65536)\
+
+int main ()
+{
+ RUN_ALL ()
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/vec-avg-rv32gcv.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/vec-avg-rv32gcv.c
new file mode 100644
index 0000000..e275433
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/vec-avg-rv32gcv.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-std=c99 -fno-vect-cost-model -march=rv32gcv -mabi=ilp32d --param=riscv-autovec-preference=scalable" } */
+
+#include "vec-avg-template.h"
+
+/* { dg-final { scan-assembler-times {\tvwadd\.vv} 6 } } */
+/* { dg-final { scan-assembler-times {\tvwaddu\.vv} 6 } } */
+/* { dg-final { scan-assembler-times {\tvadd\.vi} 6 } } */
+/* { dg-final { scan-assembler-times {\tvnsrl.wi} 6 } } */
+/* { dg-final { scan-assembler-times {\tvnsra.wi} 6 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/vec-avg-rv64gcv.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/vec-avg-rv64gcv.c
new file mode 100644
index 0000000..1f0ef29
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/vec-avg-rv64gcv.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-std=c99 -fno-vect-cost-model -march=rv64gcv -mabi=lp64d --param=riscv-autovec-preference=scalable" } */
+
+#include "vec-avg-template.h"
+
+/* { dg-final { scan-assembler-times {\tvwadd\.vv} 6 } } */
+/* { dg-final { scan-assembler-times {\tvwaddu\.vv} 6 } } */
+/* { dg-final { scan-assembler-times {\tvadd\.vi} 6 } } */
+/* { dg-final { scan-assembler-times {\tvnsrl\.wi} 6 } } */
+/* { dg-final { scan-assembler-times {\tvnsra\.wi} 6 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/vec-avg-template.h b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/vec-avg-template.h
new file mode 100644
index 0000000..9c2a6f1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/vec-avg-template.h
@@ -0,0 +1,33 @@
+#include <stdint-gcc.h>
+
+#define TEST_TYPE(TYPE, TYPE2) \
+ __attribute__ ((noipa)) void vavg_##TYPE (TYPE *dst, TYPE *a, TYPE *b, \
+ int n) \
+ { \
+ for (int i = 0; i < n; i++) \
+ dst[i] = ((TYPE2) a[i] + b[i]) >> 1; \
+ }
+
+#define TEST_TYPE2(TYPE, TYPE2) \
+ __attribute__ ((noipa)) void vavg2_##TYPE (TYPE *dst, TYPE *a, TYPE *b, \
+ int n) \
+ { \
+ for (int i = 0; i < n; i++) \
+ dst[i] = ((TYPE2) a[i] + b[i] + 1) >> 1; \
+ }
+
+#define TEST_ALL() \
+ TEST_TYPE (int8_t, int16_t) \
+ TEST_TYPE (uint8_t, uint16_t) \
+ TEST_TYPE (int16_t, int32_t) \
+ TEST_TYPE (uint16_t, uint32_t) \
+ TEST_TYPE (int32_t, int64_t) \
+ TEST_TYPE (uint32_t, uint64_t) \
+ TEST_TYPE2 (int8_t, int16_t) \
+ TEST_TYPE2 (uint8_t, uint16_t) \
+ TEST_TYPE2 (int16_t, int32_t) \
+ TEST_TYPE2 (uint16_t, uint32_t) \
+ TEST_TYPE2 (int32_t, int64_t) \
+ TEST_TYPE2 (uint32_t, uint64_t)
+
+TEST_ALL()