aboutsummaryrefslogtreecommitdiff
path: root/gcc/config
diff options
context:
space:
mode:
authorAaron Sawdey <acsawdey@linux.ibm.com>2019-01-03 19:20:57 +0000
committerAaron Sawdey <acsawdey@gcc.gnu.org>2019-01-03 13:20:57 -0600
commit19db0ebb16b4df01c536e108257bab81e0b1dabd (patch)
tree2af5e2fb49656d8a262aeb76ac1a58536e6dafd2 /gcc/config
parent5c571497e1b9b4ac407aa0db359292b0de6cd42e (diff)
downloadgcc-19db0ebb16b4df01c536e108257bab81e0b1dabd.zip
gcc-19db0ebb16b4df01c536e108257bab81e0b1dabd.tar.gz
gcc-19db0ebb16b4df01c536e108257bab81e0b1dabd.tar.bz2
rs6000-string.c (expand_block_move): Don't use unaligned vsx and avoid lxvd2x/stxvd2x.
2019-01-03 Aaron Sawdey <acsawdey@linux.ibm.com> * config/rs6000/rs6000-string.c (expand_block_move): Don't use unaligned vsx and avoid lxvd2x/stxvd2x. (gen_lvx_v4si_move): New function. From-SVN: r267562
Diffstat (limited to 'gcc/config')
-rw-r--r--gcc/config/rs6000/rs6000-string.c23
1 files changed, 21 insertions, 2 deletions
diff --git a/gcc/config/rs6000/rs6000-string.c b/gcc/config/rs6000/rs6000-string.c
index f370397..37e19ed 100644
--- a/gcc/config/rs6000/rs6000-string.c
+++ b/gcc/config/rs6000/rs6000-string.c
@@ -2669,6 +2669,25 @@ expand_strn_compare (rtx operands[], int no_length)
return true;
}
+/* Generate loads and stores for a move of v4si mode using lvx/stvx.
+ This uses altivec_{l,st}vx_<mode>_internal which use unspecs to
+ keep combine from changing what instruction gets used.
+
+ DEST is the destination for the data.
+ SRC is the source of the data for the move. */
+
+static rtx
+gen_lvx_v4si_move (rtx dest, rtx src)
+{
+ gcc_assert (MEM_P (dest) ^ MEM_P (src));
+ gcc_assert (GET_MODE (dest) == V4SImode && GET_MODE (src) == V4SImode);
+
+ if (MEM_P (dest))
+ return gen_altivec_stvx_v4si_internal (dest, src);
+ else
+ return gen_altivec_lvx_v4si_internal (dest, src);
+}
+
/* Expand a block move operation, and return 1 if successful. Return 0
if we should let the compiler generate normal code.
@@ -2721,11 +2740,11 @@ expand_block_move (rtx operands[])
/* Altivec first, since it will be faster than a string move
when it applies, and usually not significantly larger. */
- if (TARGET_ALTIVEC && bytes >= 16 && (TARGET_EFFICIENT_UNALIGNED_VSX || align >= 128))
+ if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
{
move_bytes = 16;
mode = V4SImode;
- gen_func.mov = gen_movv4si;
+ gen_func.mov = gen_lvx_v4si_move;
}
else if (bytes >= 8 && TARGET_POWERPC64
&& (align >= 64 || !STRICT_ALIGNMENT))