aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEvan Cheng <evan.cheng@apple.com>2005-11-30 06:29:07 +0000
committerEric Christopher <echristo@gcc.gnu.org>2005-11-30 06:29:07 +0000
commitbe7724ed748407d7edb5b2b3958c37a9745ec40a (patch)
tree9b24fbba5892056659fbb014226d85acd35d4a09
parent7e04157d149444c6a44aa5a02356d2e6c370be73 (diff)
downloadgcc-be7724ed748407d7edb5b2b3958c37a9745ec40a.zip
gcc-be7724ed748407d7edb5b2b3958c37a9745ec40a.tar.gz
gcc-be7724ed748407d7edb5b2b3958c37a9745ec40a.tar.bz2
xmmintrin.h (_MM_TRANSPOSE4_PS): Rewrite using high/low moves and unpack to speed up.
2005-11-29 Evan Cheng <evan.cheng@apple.com> * config/i386/xmmintrin.h (_MM_TRANSPOSE4_PS): Rewrite using high/low moves and unpack to speed up. From-SVN: r107700
-rw-r--r--gcc/ChangeLog7
-rw-r--r--gcc/config/i386/xmmintrin.h16
2 files changed, 14 insertions, 9 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 89674d9..9c49626 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,8 @@
+2005-11-29 Evan Cheng <evan.cheng@apple.com>
+
+ * config/i386/xmmintrin.h (_MM_TRANSPOSE4_PS): Rewrite using high/low
+ moves and unpack to speed up.
+
2005-11-29 David S. Miller <davem@sunset.davemloft.net>
* config/sparc/sparc.c (gen_compare_reg): Kill 2nd and 3rd
@@ -107,7 +112,7 @@
Uros Bizjak <uros@kss-loka.si>
PR middle-end/20219
- * fold-const.c (fold binary) <RDIV_EXPR>: Optimize
+ * fold-const.c (fold binary) <RDIV_EXPR>: Optimize
sin(x)/tan(x) as cos(x) and tan(x)/sin(x) as 1.0/cos(x)
when flag_unsafe_math_optimizations is set and
we don't care about NaNs or Infinities.
diff --git a/gcc/config/i386/xmmintrin.h b/gcc/config/i386/xmmintrin.h
index b80d6b5..fb4d38c 100644
--- a/gcc/config/i386/xmmintrin.h
+++ b/gcc/config/i386/xmmintrin.h
@@ -1197,14 +1197,14 @@ _mm_pause (void)
#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
__v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \
- __v4sf __t0 = __builtin_ia32_shufps (__r0, __r1, 0x44); \
- __v4sf __t2 = __builtin_ia32_shufps (__r0, __r1, 0xEE); \
- __v4sf __t1 = __builtin_ia32_shufps (__r2, __r3, 0x44); \
- __v4sf __t3 = __builtin_ia32_shufps (__r2, __r3, 0xEE); \
- (row0) = __builtin_ia32_shufps (__t0, __t1, 0x88); \
- (row1) = __builtin_ia32_shufps (__t0, __t1, 0xDD); \
- (row2) = __builtin_ia32_shufps (__t2, __t3, 0x88); \
- (row3) = __builtin_ia32_shufps (__t2, __t3, 0xDD); \
+ __v4sf __t0 = __builtin_ia32_unpcklps (__r0, __r1); \
+ __v4sf __t2 = __builtin_ia32_unpcklps (__r2, __r3); \
+ __v4sf __t1 = __builtin_ia32_unpckhps (__r0, __r1); \
+ __v4sf __t3 = __builtin_ia32_unpckhps (__r2, __r3); \
+ (row0) = __builtin_ia32_movlhps (__t0, __t1); \
+ (row1) = __builtin_ia32_movhlps (__t1, __t0); \
+ (row2) = __builtin_ia32_movlhps (__t2, __t3); \
+ (row3) = __builtin_ia32_movhlps (__t3, __t2); \
} while (0)
/* For backward source compatibility. */