aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWill Schmidt <willschm@gcc.gnu.org>2019-09-26 19:19:47 +0000
committerWill Schmidt <willschm@gcc.gnu.org>2019-09-26 19:19:47 +0000
commit9ab2f9aed07c3c02ee633801d30b86a216b4cc3b (patch)
tree98fe241de0537f84f900d5c71d7dd79ac1b3ea6e
parentbe193fa7c9842f30f3aa804696ad83a4117d826e (diff)
downloadgcc-9ab2f9aed07c3c02ee633801d30b86a216b4cc3b.zip
gcc-9ab2f9aed07c3c02ee633801d30b86a216b4cc3b.tar.gz
gcc-9ab2f9aed07c3c02ee633801d30b86a216b4cc3b.tar.bz2
rs6000-builtin.def: (LVSL...
[gcc] 2019-09-26 Will Schmidt <will_schmidt@vnet.ibm.com> * config/rs6000/rs6000-builtin.def: (LVSL, LVSR, LVEBX, LVEHX, LVEWX, LVXL, LVXL_V2DF, LVXL_V2DI, LVXL_V4SF, LVXL_V4SI, LVXL_V8HI, LVXL_V16QI, LVX, LVX_V1TI, LVX_V2DF, LVX_V2DI, LVX_V4SF, LVX_V4SI, LVX_V8HI, LVX_V16QI, LVLX, LVLXL, LVRX, LVRXL, LXSDX, LXVD2X_V1TI, LXVD2X_V2DF, LXVD2X_V2DI, LXVDSX, LXVW4X_V4SF, LXVW4X_V4SI, LXVW4X_V8HI, LXVW4X_V16QI, LD_ELEMREV_V1TI, LD_ELEMREV_V2DF, LD_ELEMREV_V2DI, LD_ELEMREV_V4SF, LD_ELEMREV_V4SI, LD_ELEMREV_V8HI, LD_ELEMREV_V16QI): Use the PURE attribute. [testsuite] 2019-09-26 Will Schmidt <will_schmidt@vnet.ibm.com> * gcc.target/powerpc/pure-builtin-redundant-load.c: New. From-SVN: r276163
-rw-r--r--gcc/testsuite/gcc.target/powerpc/pure-builtin-redundant-load.c47
1 files changed, 47 insertions, 0 deletions
diff --git a/gcc/testsuite/gcc.target/powerpc/pure-builtin-redundant-load.c b/gcc/testsuite/gcc.target/powerpc/pure-builtin-redundant-load.c
new file mode 100644
index 0000000..16ab6ab
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/pure-builtin-redundant-load.c
@@ -0,0 +1,47 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target powerpc_vsx_ok } */
+/* { dg-options "-O2 -fdump-tree-fre-all -mvsx" } */
+
+/* Verify we remove a redundant load that occurs both before and after
+we call a vector load builtin.
+This testcase is introduced as we updated a number of our vector load
+built-ins with the attribute of PURE instead of MEM, to indicate that
+those builtins only read from memory, versus reading from and writing
+to the same.
+This means we can identify the redundant load instructions in an earlier
+pass, and optimize them away. */
+
+#include <altivec.h>
+
+vector signed short load_data;
+
+vector signed short foo()
+{
+ vector signed short r11,r12,r13;
+ r11 = load_data;
+ r12 = vec_xl (0, &load_data[0]);
+ r13 = load_data;
+ return (r11 + r12 + r13);
+}
+
+vector signed short biz()
+{
+ vector signed short r21,r22,r23;
+ r21 = load_data;
+ r22 = vec_lvehx (0, &load_data[0]);
+ r23 = load_data;
+ return (r21 + r22 + r23);
+}
+
+vector signed short bar()
+{
+ vector signed short r31,r32,r33;
+ r31 = load_data;
+ r32 = vec_lvx (0, &load_data[0]);
+ r33 = load_data;
+ return (r31 + r32 + r33);
+}
+
+/* { dg-final { scan-tree-dump-times "Removing dead stmt r13_. = load_data;" 1 "fre1" } } */
+/* { dg-final { scan-tree-dump-times "Removing dead stmt r23_. = load_data;" 1 "fre1" } } */
+/* { dg-final { scan-tree-dump-times "Removing dead stmt r33_. = load_data;" 1 "fre1" } } */