aboutsummaryrefslogtreecommitdiff
path: root/gcc/config
diff options
context:
space:
mode:
authorUros Bizjak <ubizjak@gmail.com>2007-05-20 15:41:13 +0200
committerUros Bizjak <uros@gcc.gnu.org>2007-05-20 15:41:13 +0200
commit46fb8f6ba3b4fa87b1b335d574da744d74cdbbc8 (patch)
tree2f6af58ae79046e8d919e7191044bde1d1bc122a /gcc/config
parentdde27bba7d11284185d1d0e97bf7dc0d302246dc (diff)
downloadgcc-46fb8f6ba3b4fa87b1b335d574da744d74cdbbc8.zip
gcc-46fb8f6ba3b4fa87b1b335d574da744d74cdbbc8.tar.gz
gcc-46fb8f6ba3b4fa87b1b335d574da744d74cdbbc8.tar.bz2
tmmintrin.h (_mm_alignr_epi32): Implement as always inlined function, not as a macro.
* config/i386/tmmintrin.h (_mm_alignr_epi32): Implement as always inlined function, not as a macro. (_mm_alignr_pi8): Ditto. * config/i386/ammintrin.h (_mm_extracti_si64): Ditto. (_mm_inserti_si64): Ditto. * config/i386/emmintrin.h (_mm_shuffle_pd): Ditto. (_mm_extract_epi16): Ditto. (_mm_insert_epi16): Ditto. (_mm_shufflehi_epi16): Ditto. (_mm_shufflelo_epi16): Ditto. (_mm_shuffle_epi32): Ditto. * config/i386/xmmintrin.h (_mm_set_ss): Use 0.0f for float constant. * config/386/mm3dnow.h: Add __attribute__((__always_inline__)) to all functions. (_m_from_float): Add __extension__ to conversion. Use 0.0f for float constant. (_m_to_float): Use C89 compatible assignment. testsuite/ChangeLog: * gcc.target/i386/sse-vect-types.c: Revert 'Use "-msse" instead of "-msse2".' * gcc.target/i386/sse-12.c: Use "-march=k8 -m3dnow -mssse3 -msse4a" instead of "-msse3". Include only ammintrin.h, tmmintrin.h and mm3dnow.h. * gcc.target/i386/sse-13.c (__builtin_ia32_extrqi): Redefine to test with immediate operand. (__builtin_ia32_insertqi): Ditto. (__builtin_ia32_palignr128): Ditto. (__builtin_ia32_palignr): Ditto. (__builtin_ia32_pshufhw): Ditto. (__builtin_ia32_pshuflw): Ditto. (__builtin_ia32_pshufd): Ditto. (__builtin_ia32_vec_set_v8hi): Ditto. (__builtin_ia32_vec_ext_v8hi): Ditto. (__builtin_ia32_shufpd): Ditto. * gcc.target/i386/sse-14.c: Same changes as sse-13.c. From-SVN: r124873
Diffstat (limited to 'gcc/config')
-rw-r--r--gcc/config/i386/ammintrin.h14
-rw-r--r--gcc/config/i386/emmintrin.h39
-rw-r--r--gcc/config/i386/mm3dnow.h63
-rw-r--r--gcc/config/i386/tmmintrin.h13
-rw-r--r--gcc/config/i386/xmmintrin.h2
5 files changed, 77 insertions, 54 deletions
diff --git a/gcc/config/i386/ammintrin.h b/gcc/config/i386/ammintrin.h
index 869c288..51eaefd 100644
--- a/gcc/config/i386/ammintrin.h
+++ b/gcc/config/i386/ammintrin.h
@@ -55,8 +55,11 @@ _mm_extract_si64 (__m128i __X, __m128i __Y)
return (__m128i) __builtin_ia32_extrq ((__v2di) __X, (__v16qi) __Y);
}
-#define _mm_extracti_si64(X, I, L) \
-((__m128i) __builtin_ia32_extrqi ((__v2di)(X), I, L))
+static __inline __m128i __attribute__((__always_inline__))
+_mm_extracti_si64 (__m128i __X, unsigned const int __I, unsigned const int __L)
+{
+ return (__m128i) __builtin_ia32_extrqi ((__v2di) __X, __I, __L);
+}
static __inline __m128i __attribute__((__always_inline__))
_mm_insert_si64 (__m128i __X,__m128i __Y)
@@ -64,8 +67,11 @@ _mm_insert_si64 (__m128i __X,__m128i __Y)
return (__m128i) __builtin_ia32_insertq ((__v2di)__X, (__v2di)__Y);
}
-#define _mm_inserti_si64(X, Y, I, L) \
-((__m128i) __builtin_ia32_insertqi ((__v2di)(X), (__v2di)(Y), I, L))
+static __inline __m128i __attribute__((__always_inline__))
+_mm_inserti_si64(__m128i __X, __m128i __Y, unsigned const int __I, unsigned const int __L)
+{
+ return (__m128i) __builtin_ia32_insertqi ((__v2di)__X, (__v2di)__Y, __I, __L);
+}
#endif /* __SSE4A__ */
diff --git a/gcc/config/i386/emmintrin.h b/gcc/config/i386/emmintrin.h
index e8ef024..f878728 100644
--- a/gcc/config/i386/emmintrin.h
+++ b/gcc/config/i386/emmintrin.h
@@ -57,7 +57,7 @@ typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__));
static __inline __m128d __attribute__((__always_inline__))
_mm_set_sd (double __F)
{
- return __extension__ (__m128d){ __F, 0 };
+ return __extension__ (__m128d){ __F, 0.0 };
}
/* Create a vector with both elements equal to F. */
@@ -880,7 +880,11 @@ _mm_cvtss_sd (__m128d __A, __m128 __B)
return (__m128d)__builtin_ia32_cvtss2sd ((__v2df) __A, (__v4sf)__B);
}
-#define _mm_shuffle_pd(__A, __B, __C) ((__m128d)__builtin_ia32_shufpd ((__v2df)__A, (__v2df)__B, (__C)))
+static __inline __m128d __attribute__((__always_inline__))
+_mm_shuffle_pd(__m128d __A, __m128d __B, const int __mask)
+{
+ return (__m128d)__builtin_ia32_shufpd ((__v2df)__A, (__v2df)__B, __mask);
+}
static __inline __m128d __attribute__((__always_inline__))
_mm_unpackhi_pd (__m128d __A, __m128d __B)
@@ -1137,13 +1141,13 @@ _mm_srai_epi32 (__m128i __A, const int __B)
static __inline __m128i __attribute__((__always_inline__))
_mm_srli_si128 (__m128i __A, const int __B)
{
- return ((__m128i)__builtin_ia32_psrldqi128 (__A, __B * 8));
+ return (__m128i)__builtin_ia32_psrldqi128 (__A, __B * 8);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_slli_si128 (__m128i __A, const int __B)
{
- return ((__m128i)__builtin_ia32_pslldqi128 (__A, __B * 8));
+ return (__m128i)__builtin_ia32_pslldqi128 (__A, __B * 8);
}
static __inline __m128i __attribute__((__always_inline__))
@@ -1290,7 +1294,6 @@ _mm_cmpgt_epi32 (__m128i __A, __m128i __B)
return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__A, (__v4si)__B);
}
-#if 0
static __inline int __attribute__((__always_inline__))
_mm_extract_epi16 (__m128i const __A, int const __N)
{
@@ -1302,12 +1305,6 @@ _mm_insert_epi16 (__m128i const __A, int const __D, int const __N)
{
return (__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)__A, __D, __N);
}
-#else
-#define _mm_extract_epi16(A, N) \
- ((int) __builtin_ia32_vec_ext_v8hi ((__v8hi)(A), (N)))
-#define _mm_insert_epi16(A, D, N) \
- ((__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)(A), (D), (N)))
-#endif
static __inline __m128i __attribute__((__always_inline__))
_mm_max_epi16 (__m128i __A, __m128i __B)
@@ -1345,9 +1342,23 @@ _mm_mulhi_epu16 (__m128i __A, __m128i __B)
return (__m128i)__builtin_ia32_pmulhuw128 ((__v8hi)__A, (__v8hi)__B);
}
-#define _mm_shufflehi_epi16(__A, __B) ((__m128i)__builtin_ia32_pshufhw ((__v8hi)__A, __B))
-#define _mm_shufflelo_epi16(__A, __B) ((__m128i)__builtin_ia32_pshuflw ((__v8hi)__A, __B))
-#define _mm_shuffle_epi32(__A, __B) ((__m128i)__builtin_ia32_pshufd ((__v4si)__A, __B))
+static __inline __m128i __attribute__((__always_inline__))
+_mm_shufflehi_epi16 (__m128i __A, const int __mask)
+{
+ return (__m128i)__builtin_ia32_pshufhw ((__v8hi)__A, __mask);
+}
+
+static __inline __m128i __attribute__((__always_inline__))
+_mm_shufflelo_epi16 (__m128i __A, const int __mask)
+{
+ return (__m128i)__builtin_ia32_pshuflw ((__v8hi)__A, __mask);
+}
+
+static __inline __m128i __attribute__((__always_inline__))
+_mm_shuffle_epi32 (__m128i __A, const int __mask)
+{
+ return (__m128i)__builtin_ia32_pshufd ((__v4si)__A, __mask);
+}
static __inline void __attribute__((__always_inline__))
_mm_maskmoveu_si128 (__m128i __A, __m128i __B, char *__C)
diff --git a/gcc/config/i386/mm3dnow.h b/gcc/config/i386/mm3dnow.h
index 7fdc6dc..fc5b35c 100644
--- a/gcc/config/i386/mm3dnow.h
+++ b/gcc/config/i386/mm3dnow.h
@@ -37,178 +37,179 @@
/* Internal data types for implementing the intrinsics. */
typedef float __v2sf __attribute__ ((__vector_size__ (8)));
-static __inline void
+static __inline void __attribute__((__always_inline__))
_m_femms (void)
{
__builtin_ia32_femms();
}
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_pavgusb (__m64 __A, __m64 __B)
{
return (__m64)__builtin_ia32_pavgusb ((__v8qi)__A, (__v8qi)__B);
}
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_pf2id (__m64 __A)
{
return (__m64)__builtin_ia32_pf2id ((__v2sf)__A);
}
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_pfacc (__m64 __A, __m64 __B)
{
return (__m64)__builtin_ia32_pfacc ((__v2sf)__A, (__v2sf)__B);
}
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_pfadd (__m64 __A, __m64 __B)
{
return (__m64)__builtin_ia32_pfadd ((__v2sf)__A, (__v2sf)__B);
}
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_pfcmpeq (__m64 __A, __m64 __B)
{
return (__m64)__builtin_ia32_pfcmpeq ((__v2sf)__A, (__v2sf)__B);
}
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_pfcmpge (__m64 __A, __m64 __B)
{
return (__m64)__builtin_ia32_pfcmpge ((__v2sf)__A, (__v2sf)__B);
}
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_pfcmpgt (__m64 __A, __m64 __B)
{
return (__m64)__builtin_ia32_pfcmpgt ((__v2sf)__A, (__v2sf)__B);
}
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_pfmax (__m64 __A, __m64 __B)
{
return (__m64)__builtin_ia32_pfmax ((__v2sf)__A, (__v2sf)__B);
}
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_pfmin (__m64 __A, __m64 __B)
{
return (__m64)__builtin_ia32_pfmin ((__v2sf)__A, (__v2sf)__B);
}
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_pfmul (__m64 __A, __m64 __B)
{
return (__m64)__builtin_ia32_pfmul ((__v2sf)__A, (__v2sf)__B);
}
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_pfrcp (__m64 __A)
{
return (__m64)__builtin_ia32_pfrcp ((__v2sf)__A);
}
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_pfrcpit1 (__m64 __A, __m64 __B)
{
return (__m64)__builtin_ia32_pfrcpit1 ((__v2sf)__A, (__v2sf)__B);
}
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_pfrcpit2 (__m64 __A, __m64 __B)
{
return (__m64)__builtin_ia32_pfrcpit2 ((__v2sf)__A, (__v2sf)__B);
}
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_pfrsqrt (__m64 __A)
{
return (__m64)__builtin_ia32_pfrsqrt ((__v2sf)__A);
}
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_pfrsqit1 (__m64 __A, __m64 __B)
{
return (__m64)__builtin_ia32_pfrsqit1 ((__v2sf)__A, (__v2sf)__B);
}
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_pfsub (__m64 __A, __m64 __B)
{
return (__m64)__builtin_ia32_pfsub ((__v2sf)__A, (__v2sf)__B);
}
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_pfsubr (__m64 __A, __m64 __B)
{
return (__m64)__builtin_ia32_pfsubr ((__v2sf)__A, (__v2sf)__B);
}
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_pi2fd (__m64 __A)
{
return (__m64)__builtin_ia32_pi2fd ((__v2si)__A);
}
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_pmulhrw (__m64 __A, __m64 __B)
{
return (__m64)__builtin_ia32_pmulhrw ((__v4hi)__A, (__v4hi)__B);
}
-static __inline void
+static __inline void __attribute__((__always_inline__))
_m_prefetch (void *__P)
{
__builtin_prefetch (__P, 0, 3 /* _MM_HINT_T0 */);
}
-static __inline void
+static __inline void __attribute__((__always_inline__))
_m_prefetchw (void *__P)
{
__builtin_prefetch (__P, 1, 3 /* _MM_HINT_T0 */);
}
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_from_float (float __A)
{
- return (__m64)(__v2sf){ __A, 0 };
+ return __extension__ (__m64)(__v2sf){ __A, 0.0f };
}
-static __inline float
+static __inline float __attribute__((__always_inline__))
_m_to_float (__m64 __A)
{
- union { __v2sf v; float a[2]; } __tmp = { (__v2sf)__A };
+ union { __v2sf v; float a[2]; } __tmp;
+ __tmp.v = (__v2sf)__A;
return __tmp.a[0];
}
#ifdef __3dNOW_A__
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_pf2iw (__m64 __A)
{
return (__m64)__builtin_ia32_pf2iw ((__v2sf)__A);
}
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_pfnacc (__m64 __A, __m64 __B)
{
return (__m64)__builtin_ia32_pfnacc ((__v2sf)__A, (__v2sf)__B);
}
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_pfpnacc (__m64 __A, __m64 __B)
{
return (__m64)__builtin_ia32_pfpnacc ((__v2sf)__A, (__v2sf)__B);
}
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_pi2fw (__m64 __A)
{
return (__m64)__builtin_ia32_pi2fw ((__v2si)__A);
}
-static __inline __m64
+static __inline __m64 __attribute__((__always_inline__))
_m_pswapd (__m64 __A)
{
return (__m64)__builtin_ia32_pswapdsf ((__v2sf)__A);
diff --git a/gcc/config/i386/tmmintrin.h b/gcc/config/i386/tmmintrin.h
index cf9d99d..dbcfbd0 100644
--- a/gcc/config/i386/tmmintrin.h
+++ b/gcc/config/i386/tmmintrin.h
@@ -181,11 +181,16 @@ _mm_sign_pi32 (__m64 __X, __m64 __Y)
return (__m64) __builtin_ia32_psignd ((__v2si)__X, (__v2si)__Y);
}
-#define _mm_alignr_epi8(__X, __Y, __N) \
- ((__m128i)__builtin_ia32_palignr128 ((__v2di) __X, (__v2di) __Y, (__N) * 8))
+static __inline __m128i __attribute__((__always_inline__))
+_mm_alignr_epi8(__m128i __X, __m128i __Y, const int __N)
+{
+ return (__m128i)__builtin_ia32_palignr128 ((__v2di)__X, (__v2di)__Y, __N * 8);}
-#define _mm_alignr_pi8(__X, __Y, __N) \
- ((__m64)__builtin_ia32_palignr ((long long) (__X), (long long) (__Y), (__N) * 8))
+static __inline __m64 __attribute__((__always_inline__))
+_mm_alignr_pi8(__m64 __X, __m64 __Y, const int __N)
+{
+ return (__m64)__builtin_ia32_palignr ((long long)__X, (long long)__Y, __N * 8);
+}
static __inline __m128i __attribute__((__always_inline__))
_mm_abs_epi8 (__m128i __X)
diff --git a/gcc/config/i386/xmmintrin.h b/gcc/config/i386/xmmintrin.h
index ac3a59a..3716daa 100644
--- a/gcc/config/i386/xmmintrin.h
+++ b/gcc/config/i386/xmmintrin.h
@@ -855,7 +855,7 @@ _MM_SET_FLUSH_ZERO_MODE (unsigned int __mode)
static __inline __m128 __attribute__((__always_inline__))
_mm_set_ss (float __F)
{
- return __extension__ (__m128)(__v4sf){ __F, 0, 0, 0 };
+ return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f };
}
/* Create a vector with all four elements equal to F. */