diff options
author | Tamar Christina <tamar.christina@arm.com> | 2023-12-24 19:18:53 +0000 |
---|---|---|
committer | Tamar Christina <tamar.christina@arm.com> | 2023-12-24 19:30:09 +0000 |
commit | 1bcc07aeb47c0ed7eb50eac8a4e057d6336669ab (patch) | |
tree | 30d809ede8c1bf0830044c1b42a2bb97e5322fb6 | |
parent | 01f4251b8775c832a92d55e2df57c9ac72eaceef (diff) | |
download | gcc-1bcc07aeb47c0ed7eb50eac8a4e057d6336669ab.zip gcc-1bcc07aeb47c0ed7eb50eac8a4e057d6336669ab.tar.gz gcc-1bcc07aeb47c0ed7eb50eac8a4e057d6336669ab.tar.bz2 |
AArch64: Add implementation for vector cbranch for Advanced SIMD
Hi All,
This adds an implementation for conditional branch optab for AArch64.
For e.g.
void f1 ()
{
for (int i = 0; i < N; i++)
{
b[i] += a[i];
if (a[i] > 0)
break;
}
}
For 128-bit vectors we generate:
cmgt v1.4s, v1.4s, #0
umaxp v1.4s, v1.4s, v1.4s
fmov x3, d1
cbnz x3, .L8
and of 64-bit vector we can omit the compression:
cmgt v1.2s, v1.2s, #0
fmov x2, d1
cbz x2, .L13
gcc/ChangeLog:
* config/aarch64/aarch64-simd.md (cbranch<mode>4): New.
gcc/testsuite/ChangeLog:
* gcc.target/aarch64/sve/vect-early-break-cbranch.c: New test.
* gcc.target/aarch64/vect-early-break-cbranch.c: New test.
-rw-r--r-- | gcc/config/aarch64/aarch64-simd.md | 42 | ||||
-rw-r--r-- | gcc/testsuite/gcc.target/aarch64/sve/vect-early-break-cbranch.c | 108 | ||||
-rw-r--r-- | gcc/testsuite/gcc.target/aarch64/vect-early-break-cbranch.c | 124 |
3 files changed, 274 insertions, 0 deletions
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index 7c5fd42..f88b5bd 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -3885,6 +3885,48 @@ DONE; }) +;; Patterns comparing two vectors and conditionally jump + +(define_expand "cbranch<mode>4" + [(set (pc) + (if_then_else + (match_operator 0 "aarch64_equality_operator" + [(match_operand:VDQ_I 1 "register_operand") + (match_operand:VDQ_I 2 "aarch64_simd_reg_or_zero")]) + (label_ref (match_operand 3 "")) + (pc)))] + "TARGET_SIMD" +{ + auto code = GET_CODE (operands[0]); + rtx tmp = operands[1]; + + /* If comparing against a non-zero vector we have to do a comparison first + so we can have a != 0 comparison with the result. */ + if (operands[2] != CONST0_RTX (<MODE>mode)) + { + tmp = gen_reg_rtx (<MODE>mode); + emit_insn (gen_xor<mode>3 (tmp, operands[1], operands[2])); + } + + /* For 64-bit vectors we need no reductions. */ + if (known_eq (128, GET_MODE_BITSIZE (<MODE>mode))) + { + /* Always reduce using a V4SI. */ + rtx reduc = gen_lowpart (V4SImode, tmp); + rtx res = gen_reg_rtx (V4SImode); + emit_insn (gen_aarch64_umaxpv4si (res, reduc, reduc)); + emit_move_insn (tmp, gen_lowpart (<MODE>mode, res)); + } + + rtx val = gen_reg_rtx (DImode); + emit_move_insn (val, gen_lowpart (DImode, tmp)); + + rtx cc_reg = aarch64_gen_compare_reg (code, val, const0_rtx); + rtx cmp_rtx = gen_rtx_fmt_ee (code, DImode, cc_reg, const0_rtx); + emit_jump_insn (gen_condjump (cmp_rtx, cc_reg, operands[3])); + DONE; +}) + ;; Patterns comparing two vectors to produce a mask. (define_expand "vec_cmp<mode><mode>" diff --git a/gcc/testsuite/gcc.target/aarch64/sve/vect-early-break-cbranch.c b/gcc/testsuite/gcc.target/aarch64/sve/vect-early-break-cbranch.c new file mode 100644 index 0000000..d150535 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/sve/vect-early-break-cbranch.c @@ -0,0 +1,108 @@ +/* { dg-do compile } */ +/* { dg-options "-O3 -fno-schedule-insns -fno-reorder-blocks -fno-schedule-insns2" } */ +/* { dg-final { check-function-bodies "**" "" "" { target lp64 } } } */ +#define N 640 +int a[N] = {0}; +int b[N] = {0}; +/* +** f1: +** ... +** cmpgt p[0-9]+.s, p[0-9]+/z, z[0-9]+.s, #0 +** ptest p[0-9]+, p[0-9]+.b +** b.any \.L[0-9]+ +** ... +*/ +void f1 () +{ + for (int i = 0; i < N; i++) + { + b[i] += a[i]; + if (a[i] > 0) + break; + } +} +/* +** f2: +** ... +** cmpge p[0-9]+.s, p[0-9]+/z, z[0-9]+.s, #0 +** ptest p[0-9]+, p[0-9]+.b +** b.any \.L[0-9]+ +** ... +*/ +void f2 () +{ + for (int i = 0; i < N; i++) + { + b[i] += a[i]; + if (a[i] >= 0) + break; + } +} +/* +** f3: +** ... +** cmpeq p[0-9]+.s, p[0-9]+/z, z[0-9]+.s, #0 +** ptest p[0-9]+, p[0-9]+.b +** b.any \.L[0-9]+ +** ... +*/ +void f3 () +{ + for (int i = 0; i < N; i++) + { + b[i] += a[i]; + if (a[i] == 0) + break; + } +} +/* +** f4: +** ... +** cmpne p[0-9]+.s, p[0-9]+/z, z[0-9]+.s, #0 +** ptest p[0-9]+, p[0-9]+.b +** b.any \.L[0-9]+ +** ... +*/ +void f4 () +{ + for (int i = 0; i < N; i++) + { + b[i] += a[i]; + if (a[i] != 0) + break; + } +} +/* +** f5: +** ... +** cmplt p[0-9]+.s, p7/z, z[0-9]+.s, #0 +** ptest p[0-9]+, p[0-9]+.b +** b.any .L[0-9]+ +** ... +*/ +void f5 () +{ + for (int i = 0; i < N; i++) + { + b[i] += a[i]; + if (a[i] < 0) + break; + } +} +/* +** f6: +** ... +** cmple p[0-9]+.s, p[0-9]+/z, z[0-9]+.s, #0 +** ptest p[0-9]+, p[0-9]+.b +** b.any \.L[0-9]+ +** ... +*/ +void f6 () +{ + for (int i = 0; i < N; i++) + { + b[i] += a[i]; + if (a[i] <= 0) + break; + } +} diff --git a/gcc/testsuite/gcc.target/aarch64/vect-early-break-cbranch.c b/gcc/testsuite/gcc.target/aarch64/vect-early-break-cbranch.c new file mode 100644 index 0000000..a5e7b94 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/vect-early-break-cbranch.c @@ -0,0 +1,124 @@ +/* { dg-do compile } */ +/* { dg-options "-O3 -fno-schedule-insns -fno-reorder-blocks -fno-schedule-insns2" } */ +/* { dg-final { check-function-bodies "**" "" "" { target lp64 } } } */ + +#pragma GCC target "+nosve" + +#define N 640 +int a[N] = {0}; +int b[N] = {0}; + + +/* +** f1: +** ... +** cmgt v[0-9]+.4s, v[0-9]+.4s, #0 +** umaxp v[0-9]+.4s, v[0-9]+.4s, v[0-9]+.4s +** fmov x[0-9]+, d[0-9]+ +** cbnz x[0-9]+, \.L[0-9]+ +** ... +*/ +void f1 () +{ + for (int i = 0; i < N; i++) + { + b[i] += a[i]; + if (a[i] > 0) + break; + } +} + +/* +** f2: +** ... +** cmge v[0-9]+.4s, v[0-9]+.4s, #0 +** umaxp v[0-9]+.4s, v[0-9]+.4s, v[0-9]+.4s +** fmov x[0-9]+, d[0-9]+ +** cbnz x[0-9]+, \.L[0-9]+ +** ... +*/ +void f2 () +{ + for (int i = 0; i < N; i++) + { + b[i] += a[i]; + if (a[i] >= 0) + break; + } +} + +/* +** f3: +** ... +** cmeq v[0-9]+.4s, v[0-9]+.4s, #0 +** umaxp v[0-9]+.4s, v[0-9]+.4s, v[0-9]+.4s +** fmov x[0-9]+, d[0-9]+ +** cbnz x[0-9]+, \.L[0-9]+ +** ... +*/ +void f3 () +{ + for (int i = 0; i < N; i++) + { + b[i] += a[i]; + if (a[i] == 0) + break; + } +} + +/* +** f4: +** ... +** cmtst v[0-9]+.4s, v[0-9]+.4s, v[0-9]+.4s +** umaxp v[0-9]+.4s, v[0-9]+.4s, v[0-9]+.4s +** fmov x[0-9]+, d[0-9]+ +** cbnz x[0-9]+, \.L[0-9]+ +** ... +*/ +void f4 () +{ + for (int i = 0; i < N; i++) + { + b[i] += a[i]; + if (a[i] != 0) + break; + } +} + +/* +** f5: +** ... +** cmlt v[0-9]+.4s, v[0-9]+.4s, #0 +** umaxp v[0-9]+.4s, v[0-9]+.4s, v[0-9]+.4s +** fmov x[0-9]+, d[0-9]+ +** cbnz x[0-9]+, \.L[0-9]+ +** ... +*/ +void f5 () +{ + for (int i = 0; i < N; i++) + { + b[i] += a[i]; + if (a[i] < 0) + break; + } +} + +/* +** f6: +** ... +** cmle v[0-9]+.4s, v[0-9]+.4s, #0 +** umaxp v[0-9]+.4s, v[0-9]+.4s, v[0-9]+.4s +** fmov x[0-9]+, d[0-9]+ +** cbnz x[0-9]+, \.L[0-9]+ +** ... +*/ +void f6 () +{ + for (int i = 0; i < N; i++) + { + b[i] += a[i]; + if (a[i] <= 0) + break; + } +} |