aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTamar Christina <tamar.christina@arm.com>2024-01-07 17:32:13 +0000
committerTamar Christina <tamar.christina@arm.com>2024-01-07 17:32:13 +0000
commitd9dd04f9f17e36854387899eb630c64a0c8d1a17 (patch)
treea3f614119a64d5431011c39165db76fb52599300
parent66d82874d2254bcb0124f77e6be220d299eab5f1 (diff)
downloadgcc-d9dd04f9f17e36854387899eb630c64a0c8d1a17.zip
gcc-d9dd04f9f17e36854387899eb630c64a0c8d1a17.tar.gz
gcc-d9dd04f9f17e36854387899eb630c64a0c8d1a17.tar.bz2
arm: Add Advanced SIMD cbranch implementation
This adds an implementation for conditional branch optab for AArch32. For e.g. void f1 () { for (int i = 0; i < N; i++) { b[i] += a[i]; if (a[i] > 0) break; } } For 128-bit vectors we generate: vcgt.s32 q8, q9, #0 vpmax.u32 d7, d16, d17 vpmax.u32 d7, d7, d7 vmov r3, s14 @ int cmp r3, #0 and of 64-bit vector we can omit one vpmax as we still need to compress to 32-bits. gcc/ChangeLog: * config/arm/neon.md (cbranch<mode>4): New. gcc/testsuite/ChangeLog: * gcc.dg/vect/vect-early-break_2.c: Skip Arm. * gcc.dg/vect/vect-early-break_7.c: Likewise. * gcc.dg/vect/vect-early-break_75.c: Likewise. * gcc.dg/vect/vect-early-break_77.c: Likewise. * gcc.dg/vect/vect-early-break_82.c: Likewise. * gcc.dg/vect/vect-early-break_88.c: Likewise. * lib/target-supports.exp (add_options_for_vect_early_break, check_effective_target_vect_early_break_hw, check_effective_target_vect_early_break): Support AArch32. * gcc.target/arm/vect-early-break-cbranch.c: New test.
-rw-r--r--gcc/config/arm/neon.md49
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-early-break_2.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-early-break_7.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-early-break_75.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-early-break_77.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-early-break_82.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-early-break_88.c2
-rw-r--r--gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c138
-rw-r--r--gcc/testsuite/lib/target-supports.exp7
9 files changed, 200 insertions, 6 deletions
diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
index 91ca7e8..bb6e28f 100644
--- a/gcc/config/arm/neon.md
+++ b/gcc/config/arm/neon.md
@@ -408,6 +408,55 @@
[(set_attr "type" "neon_store1_one_lane<q>,neon_to_gp<q>")]
)
+;; Patterns comparing two vectors and conditionally jump.
+;; Avdanced SIMD lacks a vector != comparison, but this is a quite common
+;; operation. To not pay the penalty for inverting == we can map our any
+;; comparisons to all i.e. any(~x) => all(x).
+;;
+;; However unlike the AArch64 version, we can't optimize this further as the
+;; chain is too long for combine due to these being unspecs so it doesn't fold
+;; the operation to something simpler.
+(define_expand "cbranch<mode>4"
+ [(set (pc) (if_then_else
+ (match_operator 0 "expandable_comparison_operator"
+ [(match_operand:VDQI 1 "register_operand")
+ (match_operand:VDQI 2 "reg_or_zero_operand")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "TARGET_NEON"
+{
+ rtx mask = operands[1];
+
+ /* If comparing against a non-zero vector we have to do a comparison first
+ so we can have a != 0 comparison with the result. */
+ if (operands[2] != CONST0_RTX (<MODE>mode))
+ {
+ mask = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_xor<mode>3 (mask, operands[1], operands[2]));
+ }
+
+ /* For 128-bit vectors we need an additional reductions. */
+ if (known_eq (128, GET_MODE_BITSIZE (<MODE>mode)))
+ {
+ /* Always reduce using a V4SI. */
+ rtx op1 = lowpart_subreg (V4SImode, mask, <MODE>mode);
+ mask = gen_reg_rtx (V2SImode);
+ rtx low = gen_reg_rtx (V2SImode);
+ rtx high = gen_reg_rtx (V2SImode);
+ emit_insn (gen_neon_vget_lowv4si (low, op1));
+ emit_insn (gen_neon_vget_highv4si (high, op1));
+ emit_insn (gen_neon_vpumaxv2si (mask, low, high));
+ }
+
+ rtx op1 = lowpart_subreg (V2SImode, mask, GET_MODE (mask));
+ emit_insn (gen_neon_vpumaxv2si (op1, op1, op1));
+
+ rtx val = gen_reg_rtx (SImode);
+ emit_move_insn (val, gen_lowpart (SImode, mask));
+ emit_jump_insn (gen_cbranch_cc (operands[0], val, const0_rtx, operands[3]));
+ DONE;
+})
+
;; This pattern is renamed from "vec_extract<mode><V_elem_l>" to
;; "neon_vec_extract<mode><V_elem_l>" and this pattern is called
;; by define_expand in vec-common.md file.
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c
index 5c32bf9..dec0b49 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c
@@ -5,7 +5,7 @@
/* { dg-additional-options "-Ofast" } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
#include <complex.h>
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c
index 8c86c50..d218a06 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c
@@ -5,7 +5,7 @@
/* { dg-additional-options "-Ofast" } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
#include <complex.h>
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c
index ed27f86..9dcc337 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c
@@ -3,7 +3,7 @@
/* { dg-require-effective-target vect_int } */
/* { dg-additional-options "-O3" } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "x86_64-*-* i?86-*-*" } } } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "x86_64-*-* i?86-*-* arm*-*-*" } } } } */
#include <limits.h>
#include <assert.h>
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c
index 225106a..9fa7e69 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c
@@ -3,7 +3,7 @@
/* { dg-require-effective-target vect_int } */
/* { dg-additional-options "-O3" } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
#include "tree-vect.h"
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c
index 0e9b2d8..7cd21d3 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c
@@ -5,7 +5,7 @@
/* { dg-additional-options "-Ofast" } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
#include <complex.h>
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c
index b392dd4..59ed57c 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c
@@ -3,7 +3,7 @@
/* { dg-require-effective-target vect_int } */
/* { dg-additional-options "-Ofast --param vect-partial-vector-usage=2" } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
#include "tree-vect.h"
diff --git a/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
new file mode 100644
index 0000000..f57bbd8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
@@ -0,0 +1,138 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target vect_early_break } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-require-effective-target arm32 } */
+/* { dg-options "-O3 -march=armv8-a+simd -mfpu=auto -mfloat-abi=hard -fno-schedule-insns -fno-reorder-blocks -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#define N 640
+int a[N] = {0};
+int b[N] = {0};
+
+/*
+** f1:
+** ...
+** vcgt.s32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f1 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] > 0)
+ break;
+ }
+}
+
+/*
+** f2:
+** ...
+** vcge.s32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f2 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] >= 0)
+ break;
+ }
+}
+
+/*
+** f3:
+** ...
+** vceq.i32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f3 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] == 0)
+ break;
+ }
+}
+
+/*
+** f4:
+** ...
+** vceq.i32 q[0-9]+, q[0-9]+, #0
+** vmvn q[0-9]+, q[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f4 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] != 0)
+ break;
+ }
+}
+
+/*
+** f5:
+** ...
+** vclt.s32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f5 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] < 0)
+ break;
+ }
+}
+
+/*
+** f6:
+** ...
+** vcle.s32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f6 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] <= 0)
+ break;
+ }
+}
+
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index d659531..b27c30b 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -4069,6 +4069,7 @@ proc check_effective_target_vect_early_break { } {
return [check_cached_effective_target_indexed vect_early_break {
expr {
[istarget aarch64*-*-*]
+ || [check_effective_target_arm_v8_neon_ok]
|| [check_effective_target_sse4]
}}]
}
@@ -4082,6 +4083,7 @@ proc check_effective_target_vect_early_break_hw { } {
return [check_cached_effective_target_indexed vect_early_break_hw {
expr {
[istarget aarch64*-*-*]
+ || [check_effective_target_arm_v8_neon_hw]
|| [check_sse4_hw_available]
}}]
}
@@ -4091,6 +4093,11 @@ proc add_options_for_vect_early_break { flags } {
return "$flags"
}
+ if { [check_effective_target_arm_v8_neon_ok] } {
+ global et_arm_v8_neon_flags
+ return "$flags $et_arm_v8_neon_flags -march=armv8-a"
+ }
+
if { [check_effective_target_sse4] } {
return "$flags -msse4.1"
}