diff options
author | Richard Sandiford <richard.sandiford@arm.com> | 2024-05-29 16:43:33 +0100 |
---|---|---|
committer | Richard Sandiford <richard.sandiford@arm.com> | 2024-05-29 16:43:33 +0100 |
commit | 39263ed2d39ac1cebde59bc5e72ddcad5dc7a1ec (patch) | |
tree | 74a82865b5ad39bc6923a031027544ac63fcf008 | |
parent | d22eaeca7634b57e80ea61cadd82902fdc7e57ea (diff) | |
download | gcc-39263ed2d39ac1cebde59bc5e72ddcad5dc7a1ec.zip gcc-39263ed2d39ac1cebde59bc5e72ddcad5dc7a1ec.tar.gz gcc-39263ed2d39ac1cebde59bc5e72ddcad5dc7a1ec.tar.bz2 |
aarch64: Split aarch64_combinev16qi before RA [PR115258]
Two-vector TBL instructions are fed by an aarch64_combinev16qi, whose
purpose is to put the two input data vectors into consecutive registers.
This aarch64_combinev16qi was then split after reload into individual
moves (from the first input to the first half of the output, and from
the second input to the second half of the output).
In the worst case, the RA might allocate things so that the destination
of the aarch64_combinev16qi is the second input followed by the first
input. In that case, the split form of aarch64_combinev16qi uses three
eors to swap the registers around.
This PR is about a test where this worst case occurred. And given the
insn description, that allocation doesn't semm unreasonable.
early-ra should (hopefully) mean that we're now better at allocating
subregs of vector registers. The upcoming RA subreg patches should
improve things further. The best fix for the PR therefore seems
to be to split the combination before RA, so that the RA can see
the underlying moves.
Perhaps it even makes sense to do this at expand time, avoiding the need
for aarch64_combinev16qi entirely. That deserves more experimentation
though.
gcc/
PR target/115258
* config/aarch64/aarch64-simd.md (aarch64_combinev16qi): Allow
the split before reload.
* config/aarch64/aarch64.cc (aarch64_split_combinev16qi): Generalize
into a form that handles pseudo registers.
gcc/testsuite/
PR target/115258
* gcc.target/aarch64/pr115258.c: New test.
-rw-r--r-- | gcc/config/aarch64/aarch64-simd.md | 2 | ||||
-rw-r--r-- | gcc/config/aarch64/aarch64.cc | 29 | ||||
-rw-r--r-- | gcc/testsuite/gcc.target/aarch64/pr115258.c | 19 |
3 files changed, 34 insertions, 16 deletions
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index c311888..868f448 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -8474,7 +8474,7 @@ UNSPEC_CONCAT))] "TARGET_SIMD" "#" - "&& reload_completed" + "&& 1" [(const_int 0)] { aarch64_split_combinev16qi (operands); diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc index ee12d88..13191ec 100644 --- a/gcc/config/aarch64/aarch64.cc +++ b/gcc/config/aarch64/aarch64.cc @@ -25333,27 +25333,26 @@ aarch64_output_sve_ptrues (rtx const_unspec) void aarch64_split_combinev16qi (rtx operands[3]) { - unsigned int dest = REGNO (operands[0]); - unsigned int src1 = REGNO (operands[1]); - unsigned int src2 = REGNO (operands[2]); machine_mode halfmode = GET_MODE (operands[1]); - unsigned int halfregs = REG_NREGS (operands[1]); - rtx destlo, desthi; gcc_assert (halfmode == V16QImode); - if (src1 == dest && src2 == dest + halfregs) + rtx destlo = simplify_gen_subreg (halfmode, operands[0], + GET_MODE (operands[0]), 0); + rtx desthi = simplify_gen_subreg (halfmode, operands[0], + GET_MODE (operands[0]), + GET_MODE_SIZE (halfmode)); + + bool skiplo = rtx_equal_p (destlo, operands[1]); + bool skiphi = rtx_equal_p (desthi, operands[2]); + + if (skiplo && skiphi) { /* No-op move. Can't split to nothing; emit something. */ emit_note (NOTE_INSN_DELETED); return; } - /* Preserve register attributes for variable tracking. */ - destlo = gen_rtx_REG_offset (operands[0], halfmode, dest, 0); - desthi = gen_rtx_REG_offset (operands[0], halfmode, dest + halfregs, - GET_MODE_SIZE (halfmode)); - /* Special case of reversed high/low parts. */ if (reg_overlap_mentioned_p (operands[2], destlo) && reg_overlap_mentioned_p (operands[1], desthi)) @@ -25366,16 +25365,16 @@ aarch64_split_combinev16qi (rtx operands[3]) { /* Try to avoid unnecessary moves if part of the result is in the right place already. */ - if (src1 != dest) + if (!skiplo) emit_move_insn (destlo, operands[1]); - if (src2 != dest + halfregs) + if (!skiphi) emit_move_insn (desthi, operands[2]); } else { - if (src2 != dest + halfregs) + if (!skiphi) emit_move_insn (desthi, operands[2]); - if (src1 != dest) + if (!skiplo) emit_move_insn (destlo, operands[1]); } } diff --git a/gcc/testsuite/gcc.target/aarch64/pr115258.c b/gcc/testsuite/gcc.target/aarch64/pr115258.c new file mode 100644 index 0000000..9a489d4 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/pr115258.c @@ -0,0 +1,19 @@ +/* { dg-options "-O2" } */ +/* { dg-final { check-function-bodies "**" "" "" } } */ + +/* +** fun: +** (ldr|adrp) [^\n]+ +** (ldr|adrp) [^\n]+ +** (ldr|adrp) [^\n]+ +** (ldr|adrp) [^\n]+ +** tbl v[0-9]+.16b, {v[0-9]+.16b - v[0-9]+.16b}, v[0-9]+.16b +** str [^\n]+ +** ret +*/ +typedef int veci __attribute__ ((vector_size (4 * sizeof (int)))); +void fun (veci *a, veci *b, veci *c) { + *c = __builtin_shufflevector (*a, *b, 0, 5, 2, 7); +} + +/* { dg-final { scan-assembler-not {\teor\t} } } */ |