From eb701deb655a1ae74c35f2719d33c701544c7ccd Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Tue, 11 Jan 2005 13:33:14 -0800 Subject: re PR rtl-optimization/13366 (ICE using MMX/SSE builtins with -O) PR target/13366 * config/i386/i386.h (enum ix86_builtins): Move ... * config/i386/i386.c: ... here. (IX86_BUILTIN_MOVDDUP, IX86_BUILTIN_MMX_ZERO, IX86_BUILTIN_PEXTRW, IX86_BUILTIN_PINSRW, IX86_BUILTIN_LOADAPS, IX86_BUILTIN_LOADSS, IX86_BUILTIN_STORESS, IX86_BUILTIN_SSE_ZERO, IX86_BUILTIN_PEXTRW128, IX86_BUILTIN_PINSRW128, IX86_BUILTIN_LOADAPD, IX86_BUILTIN_LOADSD, IX86_BUILTIN_STOREAPD, IX86_BUILTIN_STORESD, IX86_BUILTIN_STOREHPD, IX86_BUILTIN_STORELPD, IX86_BUILTIN_SETPD1, IX86_BUILTIN_SETPD, IX86_BUILTIN_CLRPD, IX86_BUILTIN_LOADPD1, IX86_BUILTIN_LOADRPD, IX86_BUILTIN_STOREPD1, IX86_BUILTIN_STORERPD, IX86_BUILTIN_LOADDQA, IX86_BUILTIN_STOREDQA, IX86_BUILTIN_CLRTI, IX86_BUILTIN_LOADDDUP): Remove. (IX86_BUILTIN_VEC_INIT_V2SI, IX86_BUILTIN_VEC_INIT_V4HI, IX86_BUILTIN_VEC_INIT_V8QI, IX86_BUILTIN_VEC_EXT_V2DF, IX86_BUILTIN_VEC_EXT_V2DI, IX86_BUILTIN_VEC_EXT_V4SF, IX86_BUILTIN_VEC_EXT_V8HI, IX86_BUILTIN_VEC_EXT_V4HI, IX86_BUILTIN_VEC_SET_V8HI, IX86_BUILTIN_VEC_SET_V4HI): New. (ix86_init_builtins): Make static. (ix86_init_mmx_sse_builtins): Update for changed builtins. (ix86_expand_binop_builtin): Only use ix86_fixup_binary_operands if all the modes match. Otherwise, fake it. (get_element_number, ix86_expand_vec_init_builtin, ix86_expand_vec_ext_builtin, ix86_expand_vec_set_builtin): New. (ix86_expand_builtin): Make static. Update for changed builtins. (ix86_expand_vector_move_misalign): Use sse2_loadlpd with zero operand instead of sse2_loadsd. Cast sse1 fallback to V4SFmode. (ix86_expand_vector_init_duplicate): New. (ix86_expand_vector_init_low_nonzero): New. (ix86_expand_vector_init_one_var, ix86_expand_vector_init_general): Split out from ix86_expand_vector_init; handle integer modes. (ix86_expand_vector_init): Use them. (ix86_expand_vector_set, ix86_expand_vector_extract): New. * config/i386/i386-protos.h: Update. * config/i386/predicates.md (reg_or_0_operand): New. * config/i386/mmx.md (mov_internal): Add 'r' variants. (movv2sf_internal): Likewise. And a splitter to match them all. (vec_dupv2sf, mmx_concatv2sf, vec_setv2sf, vec_extractv2sf, vec_initv2sf, vec_dupv4hi, vec_dupv2si, mmx_concatv2si, vec_setv2si, vec_extractv2si, vec_initv2si, vec_setv4hi, vec_extractv4hi, vec_initv4hi, vec_setv8qi, vec_extractv8qi, vec_initv8qi): New. (mmx_pinsrw): Fix operand ordering. * config/i386/sse.md (movv4sf splitter): Use direct pattern, rather than sse_loadss expander. (movv2df splitter): Similarly. (sse_loadss, sse_loadlss): Remove. (vec_dupv4sf, sse_concatv2sf, sse_concatv4sf, vec_extractv4sf_0): New. (vec_setv4sf, vec_setv2df): Use ix86_expand_vector_set. (vec_extractv4sf, vec_extractv2df): Use ix86_expand_vector_extract. (sse3_movddup): Rename with '*'. (sse3_movddup splitter): Use gen_rtx_REG instead of gen_lowpart. (sse2_loadsd): Remove. (vec_dupv2df_sse3): Rename from sse3_loadddup. (vec_dupv2df, vec_concatv2df_sse3, vec_concatv2df): New. (sse2_pinsrw): Fix argument ordering. (sse2_loadld, sse2_loadq): Add sse1 alternatives. (sse2_stored): Remove 'r' destination. (vec_dupv4si, vec_dupv2di, sse2_concatv2si, sse1_concatv2si, vec_concatv4si_1, vec_concatv2di, vec_setv2di, vec_extractv2di, vec_initv2di, vec_setv4si, vec_extractv4si, vec_initv4si, vec_setv8hi, vec_extractv8hi, vec_initv8hi, vec_setv16qi, vec_extractv16qi, vec_initv16qi): New. * config/i386/emmintrin.h (__m128i, __m128d): Use typedef, not define. (_mm_set_sd, _mm_set1_pd, _mm_setzero_pd, _mm_set_epi64x, _mm_set_epi32, _mm_set_epi16, _mm_set_epi8, _mm_setzero_si128): Use constructor form. (_mm_load_pd, _mm_store_pd): Use plain dereference. (_mm_load_si128, _mm_store_si128): Likewise. (_mm_load1_pd): Use _mm_set1_pd. (_mm_load_sd): Use _mm_set_sd. (_mm_store_sd, _mm_storeh_pd): Use __builtin_ia32_vec_ext_v2df. (_mm_store1_pd, _mm_storer_pd): Use _mm_store_pd. (_mm_set_epi64): Use _mm_set_epi64x. (_mm_set1_epi64x, _mm_set1_epi64, _mm_set1_epi32, _mm_set_epi16, _mm_set1_epi8, _mm_setr_epi64, _mm_setr_epi32, _mm_setr_epi16, _mm_setr_epi8): Use _mm_set_foo form. (_mm_loadl_epi64, _mm_movpi64_epi64, _mm_move_epi64): Use _mm_set_epi64. (_mm_storel_epi64, _mm_movepi64_pi64): Use __builtin_ia32_vec_ext_v2di. (_mm_extract_epi16): Use __builtin_ia32_vec_ext_v8hi. (_mm_insert_epi16): Use __builtin_ia32_vec_set_v8hi. * config/i386/mmintrin.h (_mm_setzero_si64): Use plain cast. (_mm_set_pi32): Use __builtin_ia32_vec_init_v2si. (_mm_set_pi16): Use __builtin_ia32_vec_init_v4hi. (_mm_set_pi8): Use __builtin_ia32_vec_init_v8qi. (_mm_set1_pi16, _mm_set1_pi8): Use _mm_set_piN variant. * config/i386/pmmintrin.h (_mm_loaddup_pd): Use _mm_load1_pd. (_mm_movedup_pd): Use _mm_shuffle_pd. * config/i386/xmmintrin.h (_mm_setzero_ps, _mm_set_ss, _mm_set1_ps, _mm_set_ps, _mm_setr_ps): Use constructor form. (_mm_cvtpi16_ps, _mm_cvtpu16_ps, _mm_cvtpi8_ps, _mm_cvtpu8_ps, _mm_cvtps_pi8, _mm_cvtpi32x2_ps): Avoid __builtin_ia32_mmx_zero; Use _mm_setzero_ps. (_mm_load_ss, _mm_load1_ps): Use _mm_set* form. (_mm_load_ps, _mm_loadr_ps): Use raw dereference. (_mm_store_ss): Use __builtin_ia32_vec_ext_v4sf. (_mm_store_ps): Use raw dereference. (_mm_store1_ps): Use _mm_storeu_ps. (_mm_storer_ps): Use _mm_store_ps. (_mm_extract_pi16): Use __builtin_ia32_vec_ext_v4hi. (_mm_insert_pi16): Use __builtin_ia32_vec_set_v4hi. From-SVN: r93199 --- gcc/ChangeLog | 104 ++ gcc/config/i386/emmintrin.h | 439 ++++----- gcc/config/i386/i386-protos.h | 7 +- gcc/config/i386/i386.c | 1596 +++++++++++++++++++++++++------ gcc/config/i386/i386.h | 458 --------- gcc/config/i386/mmintrin.h | 44 +- gcc/config/i386/mmx.md | 215 ++++- gcc/config/i386/pmmintrin.h | 4 +- gcc/config/i386/predicates.md | 5 + gcc/config/i386/sse.md | 562 +++++++---- gcc/config/i386/xmmintrin.h | 146 ++- gcc/testsuite/gcc.target/i386/pr13366.c | 13 + 12 files changed, 2249 insertions(+), 1344 deletions(-) create mode 100644 gcc/testsuite/gcc.target/i386/pr13366.c (limited to 'gcc') diff --git a/gcc/ChangeLog b/gcc/ChangeLog index ded748b..d57a3b0 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,107 @@ +2005-01-11 Richard Henderson + + PR target/13366 + * config/i386/i386.h (enum ix86_builtins): Move ... + * config/i386/i386.c: ... here. + (IX86_BUILTIN_MOVDDUP, IX86_BUILTIN_MMX_ZERO, IX86_BUILTIN_PEXTRW, + IX86_BUILTIN_PINSRW, IX86_BUILTIN_LOADAPS, IX86_BUILTIN_LOADSS, + IX86_BUILTIN_STORESS, IX86_BUILTIN_SSE_ZERO, IX86_BUILTIN_PEXTRW128, + IX86_BUILTIN_PINSRW128, IX86_BUILTIN_LOADAPD, IX86_BUILTIN_LOADSD, + IX86_BUILTIN_STOREAPD, IX86_BUILTIN_STORESD, IX86_BUILTIN_STOREHPD, + IX86_BUILTIN_STORELPD, IX86_BUILTIN_SETPD1, IX86_BUILTIN_SETPD, + IX86_BUILTIN_CLRPD, IX86_BUILTIN_LOADPD1, IX86_BUILTIN_LOADRPD, + IX86_BUILTIN_STOREPD1, IX86_BUILTIN_STORERPD, IX86_BUILTIN_LOADDQA, + IX86_BUILTIN_STOREDQA, IX86_BUILTIN_CLRTI, + IX86_BUILTIN_LOADDDUP): Remove. + (IX86_BUILTIN_VEC_INIT_V2SI, IX86_BUILTIN_VEC_INIT_V4HI, + IX86_BUILTIN_VEC_INIT_V8QI, IX86_BUILTIN_VEC_EXT_V2DF, + IX86_BUILTIN_VEC_EXT_V2DI, IX86_BUILTIN_VEC_EXT_V4SF, + IX86_BUILTIN_VEC_EXT_V8HI, IX86_BUILTIN_VEC_EXT_V4HI, + IX86_BUILTIN_VEC_SET_V8HI, IX86_BUILTIN_VEC_SET_V4HI): New. + (ix86_init_builtins): Make static. + (ix86_init_mmx_sse_builtins): Update for changed builtins. + (ix86_expand_binop_builtin): Only use ix86_fixup_binary_operands + if all the modes match. Otherwise, fake it. + (get_element_number, ix86_expand_vec_init_builtin, + ix86_expand_vec_ext_builtin, ix86_expand_vec_set_builtin): New. + (ix86_expand_builtin): Make static. Update for changed builtins. + (ix86_expand_vector_move_misalign): Use sse2_loadlpd with zero + operand instead of sse2_loadsd. Cast sse1 fallback to V4SFmode. + (ix86_expand_vector_init_duplicate): New. + (ix86_expand_vector_init_low_nonzero): New. + (ix86_expand_vector_init_one_var, ix86_expand_vector_init_general): + Split out from ix86_expand_vector_init; handle integer modes. + (ix86_expand_vector_init): Use them. + (ix86_expand_vector_set, ix86_expand_vector_extract): New. + * config/i386/i386-protos.h: Update. + * config/i386/predicates.md (reg_or_0_operand): New. + * config/i386/mmx.md (mov_internal): Add 'r' variants. + (movv2sf_internal): Likewise. And a splitter to match them all. + (vec_dupv2sf, mmx_concatv2sf, vec_setv2sf, vec_extractv2sf, + vec_initv2sf, vec_dupv4hi, vec_dupv2si, mmx_concatv2si, vec_setv2si, + vec_extractv2si, vec_initv2si, vec_setv4hi, vec_extractv4hi, + vec_initv4hi, vec_setv8qi, vec_extractv8qi, vec_initv8qi): New. + (mmx_pinsrw): Fix operand ordering. + * config/i386/sse.md (movv4sf splitter): Use direct pattern, + rather than sse_loadss expander. + (movv2df splitter): Similarly. + (sse_loadss, sse_loadlss): Remove. + (vec_dupv4sf, sse_concatv2sf, sse_concatv4sf, vec_extractv4sf_0): New. + (vec_setv4sf, vec_setv2df): Use ix86_expand_vector_set. + (vec_extractv4sf, vec_extractv2df): Use ix86_expand_vector_extract. + (sse3_movddup): Rename with '*'. + (sse3_movddup splitter): Use gen_rtx_REG instead of gen_lowpart. + (sse2_loadsd): Remove. + (vec_dupv2df_sse3): Rename from sse3_loadddup. + (vec_dupv2df, vec_concatv2df_sse3, vec_concatv2df): New. + (sse2_pinsrw): Fix argument ordering. + (sse2_loadld, sse2_loadq): Add sse1 alternatives. + (sse2_stored): Remove 'r' destination. + (vec_dupv4si, vec_dupv2di, sse2_concatv2si, sse1_concatv2si, + vec_concatv4si_1, vec_concatv2di, vec_setv2di, vec_extractv2di, + vec_initv2di, vec_setv4si, vec_extractv4si, vec_initv4si, + vec_setv8hi, vec_extractv8hi, vec_initv8hi, vec_setv16qi, + vec_extractv16qi, vec_initv16qi): New. + + * config/i386/emmintrin.h (__m128i, __m128d): Use typedef, not define. + (_mm_set_sd, _mm_set1_pd, _mm_setzero_pd, _mm_set_epi64x, + _mm_set_epi32, _mm_set_epi16, _mm_set_epi8, _mm_setzero_si128): Use + constructor form. + (_mm_load_pd, _mm_store_pd): Use plain dereference. + (_mm_load_si128, _mm_store_si128): Likewise. + (_mm_load1_pd): Use _mm_set1_pd. + (_mm_load_sd): Use _mm_set_sd. + (_mm_store_sd, _mm_storeh_pd): Use __builtin_ia32_vec_ext_v2df. + (_mm_store1_pd, _mm_storer_pd): Use _mm_store_pd. + (_mm_set_epi64): Use _mm_set_epi64x. + (_mm_set1_epi64x, _mm_set1_epi64, _mm_set1_epi32, _mm_set_epi16, + _mm_set1_epi8, _mm_setr_epi64, _mm_setr_epi32, _mm_setr_epi16, + _mm_setr_epi8): Use _mm_set_foo form. + (_mm_loadl_epi64, _mm_movpi64_epi64, _mm_move_epi64): Use _mm_set_epi64. + (_mm_storel_epi64, _mm_movepi64_pi64): Use __builtin_ia32_vec_ext_v2di. + (_mm_extract_epi16): Use __builtin_ia32_vec_ext_v8hi. + (_mm_insert_epi16): Use __builtin_ia32_vec_set_v8hi. + * config/i386/mmintrin.h (_mm_setzero_si64): Use plain cast. + (_mm_set_pi32): Use __builtin_ia32_vec_init_v2si. + (_mm_set_pi16): Use __builtin_ia32_vec_init_v4hi. + (_mm_set_pi8): Use __builtin_ia32_vec_init_v8qi. + (_mm_set1_pi16, _mm_set1_pi8): Use _mm_set_piN variant. + * config/i386/pmmintrin.h (_mm_loaddup_pd): Use _mm_load1_pd. + (_mm_movedup_pd): Use _mm_shuffle_pd. + * config/i386/xmmintrin.h (_mm_setzero_ps, _mm_set_ss, + _mm_set1_ps, _mm_set_ps, _mm_setr_ps): Use constructor form. + (_mm_cvtpi16_ps, _mm_cvtpu16_ps, _mm_cvtpi8_ps, _mm_cvtpu8_ps, + _mm_cvtps_pi8, _mm_cvtpi32x2_ps): Avoid __builtin_ia32_mmx_zero; + Use _mm_setzero_ps. + (_mm_load_ss, _mm_load1_ps): Use _mm_set* form. + (_mm_load_ps, _mm_loadr_ps): Use raw dereference. + (_mm_store_ss): Use __builtin_ia32_vec_ext_v4sf. + (_mm_store_ps): Use raw dereference. + (_mm_store1_ps): Use _mm_storeu_ps. + (_mm_storer_ps): Use _mm_store_ps. + (_mm_extract_pi16): Use __builtin_ia32_vec_ext_v4hi. + (_mm_insert_pi16): Use __builtin_ia32_vec_set_v4hi. + 2005-01-11 Stan Shebs * config/rs6000/rs6000.c (machopic_output_stub): Issue diff --git a/gcc/config/i386/emmintrin.h b/gcc/config/i386/emmintrin.h index 2d2b710..aa7b25e 100644 --- a/gcc/config/i386/emmintrin.h +++ b/gcc/config/i386/emmintrin.h @@ -40,141 +40,156 @@ typedef int __v4si __attribute__ ((__vector_size__ (16))); typedef short __v8hi __attribute__ ((__vector_size__ (16))); typedef char __v16qi __attribute__ ((__vector_size__ (16))); +typedef __v2di __m128i; +typedef __v2df __m128d; + /* Create a selector for use with the SHUFPD instruction. */ #define _MM_SHUFFLE2(fp1,fp0) \ (((fp1) << 1) | (fp0)) -#define __m128i __v2di -#define __m128d __v2df +/* Create a vector with element 0 as F and the rest zero. */ +static __inline __m128d +_mm_set_sd (double __F) +{ + return (__m128d){ __F, 0 }; +} -/* Create a vector with element 0 as *P and the rest zero. */ +/* Create a vector with both elements equal to F. */ static __inline __m128d -_mm_load_sd (double const *__P) +_mm_set1_pd (double __F) { - return (__m128d) __builtin_ia32_loadsd (__P); + return (__m128d){ __F, __F }; } -/* Create a vector with all two elements equal to *P. */ static __inline __m128d -_mm_load1_pd (double const *__P) +_mm_set_pd1 (double __F) { - __v2df __tmp = __builtin_ia32_loadsd (__P); - return (__m128d) __builtin_ia32_shufpd (__tmp, __tmp, _MM_SHUFFLE2 (0,0)); + return _mm_set1_pd (__F); } +/* Create a vector with the lower value X and upper value W. */ static __inline __m128d -_mm_load_pd1 (double const *__P) +_mm_set_pd (double __W, double __X) { - return _mm_load1_pd (__P); + return (__m128d){ __X, __W }; } -/* Load two DPFP values from P. The address must be 16-byte aligned. */ +/* Create a vector with the lower value W and upper value X. */ static __inline __m128d -_mm_load_pd (double const *__P) +_mm_setr_pd (double __W, double __X) { - return (__m128d) __builtin_ia32_loadapd (__P); + return (__m128d){ __W, __X }; } -/* Load two DPFP values from P. The address need not be 16-byte aligned. */ +/* Create a vector of zeros. */ static __inline __m128d -_mm_loadu_pd (double const *__P) +_mm_setzero_pd (void) { - return (__m128d) __builtin_ia32_loadupd (__P); + return (__m128d){ 0.0, 0.0 }; } -/* Load two DPFP values in reverse order. The address must be aligned. */ +/* Sets the low DPFP value of A from the low value of B. */ static __inline __m128d -_mm_loadr_pd (double const *__P) +_mm_move_sd (__m128d __A, __m128d __B) { - __v2df __tmp = __builtin_ia32_loadapd (__P); - return (__m128d) __builtin_ia32_shufpd (__tmp, __tmp, _MM_SHUFFLE2 (0,1)); + return (__m128d) __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B); } -/* Create a vector with element 0 as F and the rest zero. */ +/* Load two DPFP values from P. The address must be 16-byte aligned. */ static __inline __m128d -_mm_set_sd (double __F) +_mm_load_pd (double const *__P) { - return (__m128d) __builtin_ia32_loadsd (&__F); + return *(__m128d *)__P; } -/* Create a vector with all two elements equal to F. */ +/* Load two DPFP values from P. The address need not be 16-byte aligned. */ static __inline __m128d -_mm_set1_pd (double __F) +_mm_loadu_pd (double const *__P) { - __v2df __tmp = __builtin_ia32_loadsd (&__F); - return (__m128d) __builtin_ia32_shufpd (__tmp, __tmp, _MM_SHUFFLE2 (0,0)); + return __builtin_ia32_loadupd (__P); } +/* Create a vector with all two elements equal to *P. */ static __inline __m128d -_mm_set_pd1 (double __F) +_mm_load1_pd (double const *__P) { - return _mm_set1_pd (__F); + return _mm_set1_pd (*__P); } -/* Create the vector [Z Y]. */ +/* Create a vector with element 0 as *P and the rest zero. */ static __inline __m128d -_mm_set_pd (double __Z, double __Y) +_mm_load_sd (double const *__P) { - return (__v2df) {__Y, __Z}; + return _mm_set_sd (*__P); } -/* Create the vector [Y Z]. */ static __inline __m128d -_mm_setr_pd (double __Z, double __Y) +_mm_load_pd1 (double const *__P) { - return _mm_set_pd (__Y, __Z); + return _mm_load1_pd (__P); } -/* Create a vector of zeros. */ +/* Load two DPFP values in reverse order. The address must be aligned. */ static __inline __m128d -_mm_setzero_pd (void) +_mm_loadr_pd (double const *__P) { - return (__m128d) __builtin_ia32_setzeropd (); + __m128d __tmp = _mm_load_pd (__P); + return __builtin_ia32_shufpd (__tmp, __tmp, _MM_SHUFFLE2 (0,1)); +} + +/* Store two DPFP values. The address must be 16-byte aligned. */ +static __inline void +_mm_store_pd (double *__P, __m128d __A) +{ + *(__m128d *)__P = __A; +} + +/* Store two DPFP values. The address need not be 16-byte aligned. */ +static __inline void +_mm_storeu_pd (double *__P, __m128d __A) +{ + __builtin_ia32_storeupd (__P, __A); } /* Stores the lower DPFP value. */ static __inline void _mm_store_sd (double *__P, __m128d __A) { - __builtin_ia32_storesd (__P, (__v2df)__A); + *__P = __builtin_ia32_vec_ext_v2df (__A, 0); } -/* Store the lower DPFP value across two words. */ static __inline void -_mm_store1_pd (double *__P, __m128d __A) +_mm_storel_pd (double *__P, __m128d __A) { - __v2df __va = (__v2df)__A; - __v2df __tmp = __builtin_ia32_shufpd (__va, __va, _MM_SHUFFLE2 (0,0)); - __builtin_ia32_storeapd (__P, __tmp); + _mm_store_sd (__P, __A); } +/* Stores the upper DPFP value. */ static __inline void -_mm_store_pd1 (double *__P, __m128d __A) +_mm_storeh_pd (double *__P, __m128d __A) { - _mm_store1_pd (__P, __A); + *__P = __builtin_ia32_vec_ext_v2df (__A, 1); } -/* Store two DPFP values. The address must be 16-byte aligned. */ +/* Store the lower DPFP value across two words. + The address must be 16-byte aligned. */ static __inline void -_mm_store_pd (double *__P, __m128d __A) +_mm_store1_pd (double *__P, __m128d __A) { - __builtin_ia32_storeapd (__P, (__v2df)__A); + _mm_store_pd (__P, __builtin_ia32_shufpd (__A, __A, _MM_SHUFFLE2 (0,0))); } -/* Store two DPFP values. The address need not be 16-byte aligned. */ static __inline void -_mm_storeu_pd (double *__P, __m128d __A) +_mm_store_pd1 (double *__P, __m128d __A) { - __builtin_ia32_storeupd (__P, (__v2df)__A); + _mm_store1_pd (__P, __A); } /* Store two DPFP values in reverse order. The address must be aligned. */ static __inline void _mm_storer_pd (double *__P, __m128d __A) { - __v2df __va = (__v2df)__A; - __v2df __tmp = __builtin_ia32_shufpd (__va, __va, _MM_SHUFFLE2 (0,1)); - __builtin_ia32_storeapd (__P, __tmp); + _mm_store_pd (__P, __builtin_ia32_shufpd (__A, __A, _MM_SHUFFLE2 (0,1))); } static __inline int @@ -193,13 +208,6 @@ _mm_cvtsi128_si64x (__m128i __A) } #endif -/* Sets the low DPFP value of A from the low value of B. */ -static __inline __m128d -_mm_move_sd (__m128d __A, __m128d __B) -{ - return (__m128d) __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B); -} - static __inline __m128d _mm_add_pd (__m128d __A, __m128d __B) @@ -543,277 +551,171 @@ _mm_ucomineq_sd (__m128d __A, __m128d __B) return __builtin_ia32_ucomisdneq ((__v2df)__A, (__v2df)__B); } -/* Create a vector with element 0 as *P and the rest zero. */ +/* Create a vector of Qi, where i is the element number. */ static __inline __m128i -_mm_load_si128 (__m128i const *__P) +_mm_set_epi64x (long long __q1, long long __q0) { - return (__m128i) __builtin_ia32_loaddqa ((char const *)__P); + return (__m128i)(__v2di){ __q0, __q1 }; } static __inline __m128i -_mm_loadu_si128 (__m128i const *__P) +_mm_set_epi64 (__m64 __q1, __m64 __q0) { - return (__m128i) __builtin_ia32_loaddqu ((char const *)__P); + return _mm_set_epi64x ((long long)__q1, (long long)__q0); } static __inline __m128i -_mm_loadl_epi64 (__m128i const *__P) +_mm_set_epi32 (int __q3, int __q2, int __q1, int __q0) { - return (__m128i) __builtin_ia32_movq2dq (*(unsigned long long *)__P); + return (__m128i)(__v4si){ __q0, __q1, __q2, __q3 }; } -static __inline void -_mm_store_si128 (__m128i *__P, __m128i __B) +static __inline __m128i +_mm_set_epi16 (short __q7, short __q6, short __q5, short __q4, + short __q3, short __q2, short __q1, short __q0) { - __builtin_ia32_storedqa ((char *)__P, (__v16qi)__B); + return (__m128i)(__v8hi){ __q0, __q1, __q2, __q3, __q4, __q5, __q6, __q7 }; } -static __inline void -_mm_storeu_si128 (__m128i *__P, __m128i __B) +static __inline __m128i +_mm_set_epi8 (char __q15, char __q14, char __q13, char __q12, + char __q11, char __q10, char __q09, char __q08, + char __q07, char __q06, char __q05, char __q04, + char __q03, char __q02, char __q01, char __q00) { - __builtin_ia32_storedqu ((char *)__P, (__v16qi)__B); + return (__m128i)(__v16qi){ + __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07, + __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15 + }; } -static __inline void -_mm_storel_epi64 (__m128i *__P, __m128i __B) -{ - *(long long *)__P = __builtin_ia32_movdq2q ((__v2di)__B); -} +/* Set all of the elements of the vector to A. */ -static __inline __m64 -_mm_movepi64_pi64 (__m128i __B) +static __inline __m128i +_mm_set1_epi64x (long long __A) { - return (__m64) __builtin_ia32_movdq2q ((__v2di)__B); + return _mm_set_epi64x (__A, __A); } static __inline __m128i -_mm_move_epi64 (__m128i __A) +_mm_set1_epi64 (__m64 __A) { - return (__m128i) __builtin_ia32_movq ((__v2di)__A); + return _mm_set_epi64 (__A, __A); } -/* Create a vector of zeros. */ static __inline __m128i -_mm_setzero_si128 (void) +_mm_set1_epi32 (int __A) { - return (__m128i) __builtin_ia32_setzero128 (); + return _mm_set_epi32 (__A, __A, __A, __A); } static __inline __m128i -_mm_set_epi64 (__m64 __A, __m64 __B) +_mm_set1_epi16 (short __A) { - __v2di __tmp = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__A); - __v2di __tmp2 = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__B); - return (__m128i)__builtin_ia32_punpcklqdq128 (__tmp2, __tmp); + return _mm_set_epi16 (__A, __A, __A, __A, __A, __A, __A, __A); } -/* Create the vector [Z Y X W]. */ static __inline __m128i -_mm_set_epi32 (int __Z, int __Y, int __X, int __W) +_mm_set1_epi8 (char __A) { - union { - int __a[4]; - __m128i __v; - } __u; + return _mm_set_epi8 (__A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A); +} - __u.__a[0] = __W; - __u.__a[1] = __X; - __u.__a[2] = __Y; - __u.__a[3] = __Z; +/* Create a vector of Qi, where i is the element number. + The parameter order is reversed from the _mm_set_epi* functions. */ - return __u.__v; +static __inline __m128i +_mm_setr_epi64 (__m64 __q0, __m64 __q1) +{ + return _mm_set_epi64 (__q1, __q0); } -#ifdef __x86_64__ -/* Create the vector [Z Y]. */ static __inline __m128i -_mm_set_epi64x (long long __Z, long long __Y) +_mm_setr_epi32 (int __q0, int __q1, int __q2, int __q3) { - union { - long __a[2]; - __m128i __v; - } __u; - - __u.__a[0] = __Y; - __u.__a[1] = __Z; - - return __u.__v; + return _mm_set_epi32 (__q3, __q2, __q1, __q0); } -#endif -/* Create the vector [S T U V Z Y X W]. */ static __inline __m128i -_mm_set_epi16 (short __Z, short __Y, short __X, short __W, - short __V, short __U, short __T, short __S) +_mm_setr_epi16 (short __q0, short __q1, short __q2, short __q3, + short __q4, short __q5, short __q6, short __q7) { - union { - short __a[8]; - __m128i __v; - } __u; - - __u.__a[0] = __S; - __u.__a[1] = __T; - __u.__a[2] = __U; - __u.__a[3] = __V; - __u.__a[4] = __W; - __u.__a[5] = __X; - __u.__a[6] = __Y; - __u.__a[7] = __Z; - - return __u.__v; + return _mm_set_epi16 (__q7, __q6, __q5, __q4, __q3, __q2, __q1, __q0); } -/* Create the vector [S T U V Z Y X W]. */ static __inline __m128i -_mm_set_epi8 (char __Z, char __Y, char __X, char __W, - char __V, char __U, char __T, char __S, - char __Z1, char __Y1, char __X1, char __W1, - char __V1, char __U1, char __T1, char __S1) +_mm_setr_epi8 (char __q00, char __q01, char __q02, char __q03, + char __q04, char __q05, char __q06, char __q07, + char __q08, char __q09, char __q10, char __q11, + char __q12, char __q13, char __q14, char __q15) { - union { - char __a[16]; - __m128i __v; - } __u; + return _mm_set_epi8 (__q15, __q14, __q13, __q12, __q11, __q10, __q09, __q08, + __q07, __q06, __q05, __q04, __q03, __q02, __q01, __q00); +} - __u.__a[0] = __S1; - __u.__a[1] = __T1; - __u.__a[2] = __U1; - __u.__a[3] = __V1; - __u.__a[4] = __W1; - __u.__a[5] = __X1; - __u.__a[6] = __Y1; - __u.__a[7] = __Z1; - __u.__a[8] = __S; - __u.__a[9] = __T; - __u.__a[10] = __U; - __u.__a[11] = __V; - __u.__a[12] = __W; - __u.__a[13] = __X; - __u.__a[14] = __Y; - __u.__a[15] = __Z; +/* Create a vector with element 0 as *P and the rest zero. */ - return __u.__v; +static __inline __m128i +_mm_load_si128 (__m128i const *__P) +{ + return *__P; } static __inline __m128i -_mm_set1_epi64 (__m64 __A) +_mm_loadu_si128 (__m128i const *__P) { - __v2di __tmp = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__A); - return (__m128i)__builtin_ia32_punpcklqdq128 (__tmp, __tmp); + return (__m128i) __builtin_ia32_loaddqu ((char const *)__P); } static __inline __m128i -_mm_set1_epi32 (int __A) +_mm_loadl_epi64 (__m128i const *__P) { - __v4si __tmp = (__v4si)__builtin_ia32_loadd (&__A); - return (__m128i) __builtin_ia32_pshufd ((__v4si)__tmp, _MM_SHUFFLE (0,0,0,0)); + return _mm_set_epi64 ((__m64)0LL, *(__m64 *)__P); } -#ifdef __x86_64__ -static __inline __m128i -_mm_set1_epi64x (long long __A) +static __inline void +_mm_store_si128 (__m128i *__P, __m128i __B) { - __v2di __tmp = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__A); - return (__m128i) __builtin_ia32_shufpd ((__v2df)__tmp, (__v2df)__tmp, _MM_SHUFFLE2 (0,0)); + *__P = __B; } -#endif -static __inline __m128i -_mm_set1_epi16 (short __A) +static __inline void +_mm_storeu_si128 (__m128i *__P, __m128i __B) { - int __Acopy = (unsigned short)__A; - __v4si __tmp = (__v4si)__builtin_ia32_loadd (&__Acopy); - __tmp = (__v4si)__builtin_ia32_punpcklwd128 ((__v8hi)__tmp, (__v8hi)__tmp); - return (__m128i) __builtin_ia32_pshufd ((__v4si)__tmp, _MM_SHUFFLE (0,0,0,0)); + __builtin_ia32_storedqu ((char *)__P, (__v16qi)__B); } -static __inline __m128i -_mm_set1_epi8 (char __A) +static __inline void +_mm_storel_epi64 (__m128i *__P, __m128i __B) { - int __Acopy = (unsigned char)__A; - __v4si __tmp = (__v4si)__builtin_ia32_loadd (&__Acopy); - __tmp = (__v4si)__builtin_ia32_punpcklbw128 ((__v16qi)__tmp, (__v16qi)__tmp); - __tmp = (__v4si)__builtin_ia32_punpcklbw128 ((__v16qi)__tmp, (__v16qi)__tmp); - return (__m128i) __builtin_ia32_pshufd ((__v4si)__tmp, _MM_SHUFFLE (0,0,0,0)); + *(long long *)__P = __builtin_ia32_vec_ext_v2di ((__v2di)__B, 0); } -static __inline __m128i -_mm_setr_epi64 (__m64 __A, __m64 __B) +static __inline __m64 +_mm_movepi64_pi64 (__m128i __B) { - __v2di __tmp = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__A); - __v2di __tmp2 = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__B); - return (__m128i)__builtin_ia32_punpcklqdq128 (__tmp, __tmp2); + return (__m64) __builtin_ia32_vec_ext_v2di ((__v2di)__B, 0); } -/* Create the vector [Z Y X W]. */ static __inline __m128i -_mm_setr_epi32 (int __W, int __X, int __Y, int __Z) +_mm_movpi64_epi64 (__m64 __A) { - union { - int __a[4]; - __m128i __v; - } __u; - - __u.__a[0] = __W; - __u.__a[1] = __X; - __u.__a[2] = __Y; - __u.__a[3] = __Z; - - return __u.__v; + return _mm_set_epi64 ((__m64)0LL, __A); } -/* Create the vector [S T U V Z Y X W]. */ + static __inline __m128i -_mm_setr_epi16 (short __S, short __T, short __U, short __V, - short __W, short __X, short __Y, short __Z) +_mm_move_epi64 (__m128i __A) { - union { - short __a[8]; - __m128i __v; - } __u; - - __u.__a[0] = __S; - __u.__a[1] = __T; - __u.__a[2] = __U; - __u.__a[3] = __V; - __u.__a[4] = __W; - __u.__a[5] = __X; - __u.__a[6] = __Y; - __u.__a[7] = __Z; - - return __u.__v; + return _mm_set_epi64 ((__m64)0LL, _mm_movepi64_pi64 (__A)); } -/* Create the vector [S T U V Z Y X W]. */ +/* Create a vector of zeros. */ static __inline __m128i -_mm_setr_epi8 (char __S1, char __T1, char __U1, char __V1, - char __W1, char __X1, char __Y1, char __Z1, - char __S, char __T, char __U, char __V, - char __W, char __X, char __Y, char __Z) +_mm_setzero_si128 (void) { - union { - char __a[16]; - __m128i __v; - } __u; - - __u.__a[0] = __S1; - __u.__a[1] = __T1; - __u.__a[2] = __U1; - __u.__a[3] = __V1; - __u.__a[4] = __W1; - __u.__a[5] = __X1; - __u.__a[6] = __Y1; - __u.__a[7] = __Z1; - __u.__a[8] = __S; - __u.__a[9] = __T; - __u.__a[10] = __U; - __u.__a[11] = __V; - __u.__a[12] = __W; - __u.__a[13] = __X; - __u.__a[14] = __Y; - __u.__a[15] = __Z; - - return __u.__v; + return (__m128i)(__v4si){ 0, 0, 0, 0 }; } static __inline __m128d @@ -956,24 +858,12 @@ _mm_loadh_pd (__m128d __A, double const *__B) return (__m128d)__builtin_ia32_loadhpd ((__v2df)__A, __B); } -static __inline void -_mm_storeh_pd (double *__A, __m128d __B) -{ - __builtin_ia32_storehpd (__A, (__v2df)__B); -} - static __inline __m128d _mm_loadl_pd (__m128d __A, double const *__B) { return (__m128d)__builtin_ia32_loadlpd ((__v2df)__A, __B); } -static __inline void -_mm_storel_pd (double *__A, __m128d __B) -{ - __builtin_ia32_storelpd (__A, (__v2df)__B); -} - static __inline int _mm_movemask_pd (__m128d __A) { @@ -1365,9 +1255,24 @@ _mm_cmpgt_epi32 (__m128i __A, __m128i __B) return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__A, (__v4si)__B); } -#define _mm_extract_epi16(__A, __B) __builtin_ia32_pextrw128 ((__v8hi)__A, __B) +#if 0 +static __inline int __attribute__((__always_inline__)) +_mm_extract_epi16 (__m128i const __A, int const __N) +{ + return __builtin_ia32_vec_ext_v8hi ((__v8hi)__A, __N); +} -#define _mm_insert_epi16(__A, __B, __C) ((__m128i)__builtin_ia32_pinsrw128 ((__v8hi)__A, __B, __C)) +static __inline __m128i __attribute__((__always_inline__)) +_mm_insert_epi16 (__m128i const __A, int const __D, int const __N) +{ + return (__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)__A, __D, __N); +} +#else +#define _mm_extract_epi16(A, N) \ + ((int) __builtin_ia32_vec_ext_v8hi ((__v8hi)(A), (N)) +#define _mm_insert_epi16(A, D, N) \ + ((__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)(A), (D), (N))) +#endif static __inline __m128i _mm_max_epi16 (__m128i __A, __m128i __B) @@ -1451,12 +1356,6 @@ _mm_stream_pd (double *__A, __m128d __B) __builtin_ia32_movntpd (__A, (__v2df)__B); } -static __inline __m128i -_mm_movpi64_epi64 (__m64 __A) -{ - return (__m128i)__builtin_ia32_movq2dq ((unsigned long long)__A); -} - static __inline void _mm_clflush (void const *__A) { diff --git a/gcc/config/i386/i386-protos.h b/gcc/config/i386/i386-protos.h index 5920c9f..33bf6fc 100644 --- a/gcc/config/i386/i386-protos.h +++ b/gcc/config/i386/i386-protos.h @@ -198,8 +198,6 @@ extern rtx function_arg (CUMULATIVE_ARGS *, enum machine_mode, tree, int); extern void function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode, tree, int); extern rtx ix86_function_value (tree); -extern void ix86_init_builtins (void); -extern rtx ix86_expand_builtin (tree, rtx, rtx, enum machine_mode, int); #endif #endif @@ -219,7 +217,10 @@ extern int x86_field_alignment (tree, int); extern rtx ix86_tls_get_addr (void); -extern void ix86_expand_vector_init (rtx, rtx); +extern void ix86_expand_vector_init (bool, rtx, rtx); +extern void ix86_expand_vector_set (bool, rtx, rtx, int); +extern void ix86_expand_vector_extract (bool, rtx, rtx, int); + /* In winnt.c */ extern int i386_pe_dllexport_name_p (const char *); extern int i386_pe_dllimport_name_p (const char *); diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c index 27b2339..d9dcca5 100644 --- a/gcc/config/i386/i386.c +++ b/gcc/config/i386/i386.c @@ -921,6 +921,8 @@ static tree ix86_md_asm_clobbers (tree clobbers); static bool ix86_must_pass_in_stack (enum machine_mode mode, tree type); static bool ix86_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode, tree, bool); +static void ix86_init_builtins (void); +static rtx ix86_expand_builtin (tree, rtx, rtx, enum machine_mode, int); /* This function is only used on Solaris. */ static void i386_solaris_elf_named_section (const char *, unsigned int, tree) @@ -973,7 +975,6 @@ static void init_ext_80387_constants (void); #undef TARGET_INIT_BUILTINS #define TARGET_INIT_BUILTINS ix86_init_builtins - #undef TARGET_EXPAND_BUILTIN #define TARGET_EXPAND_BUILTIN ix86_expand_builtin @@ -7668,15 +7669,14 @@ ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[]) if (TARGET_SSE2 && mode == V2DFmode) { + rtx zero; + /* When SSE registers are split into halves, we can avoid writing to the top half twice. */ if (TARGET_SSE_SPLIT_REGS) { emit_insn (gen_rtx_CLOBBER (VOIDmode, op0)); - m = adjust_address (op1, DFmode, 0); - emit_insn (gen_sse2_loadlpd (op0, op0, m)); - m = adjust_address (op1, DFmode, 8); - emit_insn (gen_sse2_loadhpd (op0, op0, m)); + zero = op0; } else { @@ -7688,11 +7688,13 @@ ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[]) followed by an unpacklpd, but this is unconfirmed. And given that the dependency depth of the unpacklpd would still be one, I'm not sure why this would be better. */ - m = adjust_address (op1, DFmode, 0); - emit_insn (gen_sse2_loadsd (op0, m)); - m = adjust_address (op1, DFmode, 8); - emit_insn (gen_sse2_loadhpd (op0, op0, m)); + zero = CONST0_RTX (V2DFmode); } + + m = adjust_address (op1, DFmode, 0); + emit_insn (gen_sse2_loadlpd (op0, zero, m)); + m = adjust_address (op1, DFmode, 8); + emit_insn (gen_sse2_loadhpd (op0, op0, m)); } else { @@ -7735,15 +7737,15 @@ ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[]) emit_insn (gen_sse2_storelpd (m, op1)); m = adjust_address (op0, DFmode, 8); emit_insn (gen_sse2_storehpd (m, op1)); - return; } else { + if (mode != V4SFmode) + op1 = gen_lowpart (V4SFmode, op1); m = adjust_address (op0, V2SFmode, 0); emit_insn (gen_sse_storelps (m, op1)); m = adjust_address (op0, V2SFmode, 8); emit_insn (gen_sse_storehps (m, op1)); - return; } } else @@ -12178,6 +12180,441 @@ x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt) #endif } +/* Codes for all the SSE/MMX builtins. */ +enum ix86_builtins +{ + IX86_BUILTIN_ADDPS, + IX86_BUILTIN_ADDSS, + IX86_BUILTIN_DIVPS, + IX86_BUILTIN_DIVSS, + IX86_BUILTIN_MULPS, + IX86_BUILTIN_MULSS, + IX86_BUILTIN_SUBPS, + IX86_BUILTIN_SUBSS, + + IX86_BUILTIN_CMPEQPS, + IX86_BUILTIN_CMPLTPS, + IX86_BUILTIN_CMPLEPS, + IX86_BUILTIN_CMPGTPS, + IX86_BUILTIN_CMPGEPS, + IX86_BUILTIN_CMPNEQPS, + IX86_BUILTIN_CMPNLTPS, + IX86_BUILTIN_CMPNLEPS, + IX86_BUILTIN_CMPNGTPS, + IX86_BUILTIN_CMPNGEPS, + IX86_BUILTIN_CMPORDPS, + IX86_BUILTIN_CMPUNORDPS, + IX86_BUILTIN_CMPNEPS, + IX86_BUILTIN_CMPEQSS, + IX86_BUILTIN_CMPLTSS, + IX86_BUILTIN_CMPLESS, + IX86_BUILTIN_CMPNEQSS, + IX86_BUILTIN_CMPNLTSS, + IX86_BUILTIN_CMPNLESS, + IX86_BUILTIN_CMPNGTSS, + IX86_BUILTIN_CMPNGESS, + IX86_BUILTIN_CMPORDSS, + IX86_BUILTIN_CMPUNORDSS, + IX86_BUILTIN_CMPNESS, + + IX86_BUILTIN_COMIEQSS, + IX86_BUILTIN_COMILTSS, + IX86_BUILTIN_COMILESS, + IX86_BUILTIN_COMIGTSS, + IX86_BUILTIN_COMIGESS, + IX86_BUILTIN_COMINEQSS, + IX86_BUILTIN_UCOMIEQSS, + IX86_BUILTIN_UCOMILTSS, + IX86_BUILTIN_UCOMILESS, + IX86_BUILTIN_UCOMIGTSS, + IX86_BUILTIN_UCOMIGESS, + IX86_BUILTIN_UCOMINEQSS, + + IX86_BUILTIN_CVTPI2PS, + IX86_BUILTIN_CVTPS2PI, + IX86_BUILTIN_CVTSI2SS, + IX86_BUILTIN_CVTSI642SS, + IX86_BUILTIN_CVTSS2SI, + IX86_BUILTIN_CVTSS2SI64, + IX86_BUILTIN_CVTTPS2PI, + IX86_BUILTIN_CVTTSS2SI, + IX86_BUILTIN_CVTTSS2SI64, + + IX86_BUILTIN_MAXPS, + IX86_BUILTIN_MAXSS, + IX86_BUILTIN_MINPS, + IX86_BUILTIN_MINSS, + + IX86_BUILTIN_LOADUPS, + IX86_BUILTIN_STOREUPS, + IX86_BUILTIN_MOVSS, + + IX86_BUILTIN_MOVHLPS, + IX86_BUILTIN_MOVLHPS, + IX86_BUILTIN_LOADHPS, + IX86_BUILTIN_LOADLPS, + IX86_BUILTIN_STOREHPS, + IX86_BUILTIN_STORELPS, + + IX86_BUILTIN_MASKMOVQ, + IX86_BUILTIN_MOVMSKPS, + IX86_BUILTIN_PMOVMSKB, + + IX86_BUILTIN_MOVNTPS, + IX86_BUILTIN_MOVNTQ, + + IX86_BUILTIN_LOADDQU, + IX86_BUILTIN_STOREDQU, + IX86_BUILTIN_MOVQ, + IX86_BUILTIN_LOADD, + IX86_BUILTIN_STORED, + + IX86_BUILTIN_PACKSSWB, + IX86_BUILTIN_PACKSSDW, + IX86_BUILTIN_PACKUSWB, + + IX86_BUILTIN_PADDB, + IX86_BUILTIN_PADDW, + IX86_BUILTIN_PADDD, + IX86_BUILTIN_PADDQ, + IX86_BUILTIN_PADDSB, + IX86_BUILTIN_PADDSW, + IX86_BUILTIN_PADDUSB, + IX86_BUILTIN_PADDUSW, + IX86_BUILTIN_PSUBB, + IX86_BUILTIN_PSUBW, + IX86_BUILTIN_PSUBD, + IX86_BUILTIN_PSUBQ, + IX86_BUILTIN_PSUBSB, + IX86_BUILTIN_PSUBSW, + IX86_BUILTIN_PSUBUSB, + IX86_BUILTIN_PSUBUSW, + + IX86_BUILTIN_PAND, + IX86_BUILTIN_PANDN, + IX86_BUILTIN_POR, + IX86_BUILTIN_PXOR, + + IX86_BUILTIN_PAVGB, + IX86_BUILTIN_PAVGW, + + IX86_BUILTIN_PCMPEQB, + IX86_BUILTIN_PCMPEQW, + IX86_BUILTIN_PCMPEQD, + IX86_BUILTIN_PCMPGTB, + IX86_BUILTIN_PCMPGTW, + IX86_BUILTIN_PCMPGTD, + + IX86_BUILTIN_PMADDWD, + + IX86_BUILTIN_PMAXSW, + IX86_BUILTIN_PMAXUB, + IX86_BUILTIN_PMINSW, + IX86_BUILTIN_PMINUB, + + IX86_BUILTIN_PMULHUW, + IX86_BUILTIN_PMULHW, + IX86_BUILTIN_PMULLW, + + IX86_BUILTIN_PSADBW, + IX86_BUILTIN_PSHUFW, + + IX86_BUILTIN_PSLLW, + IX86_BUILTIN_PSLLD, + IX86_BUILTIN_PSLLQ, + IX86_BUILTIN_PSRAW, + IX86_BUILTIN_PSRAD, + IX86_BUILTIN_PSRLW, + IX86_BUILTIN_PSRLD, + IX86_BUILTIN_PSRLQ, + IX86_BUILTIN_PSLLWI, + IX86_BUILTIN_PSLLDI, + IX86_BUILTIN_PSLLQI, + IX86_BUILTIN_PSRAWI, + IX86_BUILTIN_PSRADI, + IX86_BUILTIN_PSRLWI, + IX86_BUILTIN_PSRLDI, + IX86_BUILTIN_PSRLQI, + + IX86_BUILTIN_PUNPCKHBW, + IX86_BUILTIN_PUNPCKHWD, + IX86_BUILTIN_PUNPCKHDQ, + IX86_BUILTIN_PUNPCKLBW, + IX86_BUILTIN_PUNPCKLWD, + IX86_BUILTIN_PUNPCKLDQ, + + IX86_BUILTIN_SHUFPS, + + IX86_BUILTIN_RCPPS, + IX86_BUILTIN_RCPSS, + IX86_BUILTIN_RSQRTPS, + IX86_BUILTIN_RSQRTSS, + IX86_BUILTIN_SQRTPS, + IX86_BUILTIN_SQRTSS, + + IX86_BUILTIN_UNPCKHPS, + IX86_BUILTIN_UNPCKLPS, + + IX86_BUILTIN_ANDPS, + IX86_BUILTIN_ANDNPS, + IX86_BUILTIN_ORPS, + IX86_BUILTIN_XORPS, + + IX86_BUILTIN_EMMS, + IX86_BUILTIN_LDMXCSR, + IX86_BUILTIN_STMXCSR, + IX86_BUILTIN_SFENCE, + + /* 3DNow! Original */ + IX86_BUILTIN_FEMMS, + IX86_BUILTIN_PAVGUSB, + IX86_BUILTIN_PF2ID, + IX86_BUILTIN_PFACC, + IX86_BUILTIN_PFADD, + IX86_BUILTIN_PFCMPEQ, + IX86_BUILTIN_PFCMPGE, + IX86_BUILTIN_PFCMPGT, + IX86_BUILTIN_PFMAX, + IX86_BUILTIN_PFMIN, + IX86_BUILTIN_PFMUL, + IX86_BUILTIN_PFRCP, + IX86_BUILTIN_PFRCPIT1, + IX86_BUILTIN_PFRCPIT2, + IX86_BUILTIN_PFRSQIT1, + IX86_BUILTIN_PFRSQRT, + IX86_BUILTIN_PFSUB, + IX86_BUILTIN_PFSUBR, + IX86_BUILTIN_PI2FD, + IX86_BUILTIN_PMULHRW, + + /* 3DNow! Athlon Extensions */ + IX86_BUILTIN_PF2IW, + IX86_BUILTIN_PFNACC, + IX86_BUILTIN_PFPNACC, + IX86_BUILTIN_PI2FW, + IX86_BUILTIN_PSWAPDSI, + IX86_BUILTIN_PSWAPDSF, + + /* SSE2 */ + IX86_BUILTIN_ADDPD, + IX86_BUILTIN_ADDSD, + IX86_BUILTIN_DIVPD, + IX86_BUILTIN_DIVSD, + IX86_BUILTIN_MULPD, + IX86_BUILTIN_MULSD, + IX86_BUILTIN_SUBPD, + IX86_BUILTIN_SUBSD, + + IX86_BUILTIN_CMPEQPD, + IX86_BUILTIN_CMPLTPD, + IX86_BUILTIN_CMPLEPD, + IX86_BUILTIN_CMPGTPD, + IX86_BUILTIN_CMPGEPD, + IX86_BUILTIN_CMPNEQPD, + IX86_BUILTIN_CMPNLTPD, + IX86_BUILTIN_CMPNLEPD, + IX86_BUILTIN_CMPNGTPD, + IX86_BUILTIN_CMPNGEPD, + IX86_BUILTIN_CMPORDPD, + IX86_BUILTIN_CMPUNORDPD, + IX86_BUILTIN_CMPNEPD, + IX86_BUILTIN_CMPEQSD, + IX86_BUILTIN_CMPLTSD, + IX86_BUILTIN_CMPLESD, + IX86_BUILTIN_CMPNEQSD, + IX86_BUILTIN_CMPNLTSD, + IX86_BUILTIN_CMPNLESD, + IX86_BUILTIN_CMPORDSD, + IX86_BUILTIN_CMPUNORDSD, + IX86_BUILTIN_CMPNESD, + + IX86_BUILTIN_COMIEQSD, + IX86_BUILTIN_COMILTSD, + IX86_BUILTIN_COMILESD, + IX86_BUILTIN_COMIGTSD, + IX86_BUILTIN_COMIGESD, + IX86_BUILTIN_COMINEQSD, + IX86_BUILTIN_UCOMIEQSD, + IX86_BUILTIN_UCOMILTSD, + IX86_BUILTIN_UCOMILESD, + IX86_BUILTIN_UCOMIGTSD, + IX86_BUILTIN_UCOMIGESD, + IX86_BUILTIN_UCOMINEQSD, + + IX86_BUILTIN_MAXPD, + IX86_BUILTIN_MAXSD, + IX86_BUILTIN_MINPD, + IX86_BUILTIN_MINSD, + + IX86_BUILTIN_ANDPD, + IX86_BUILTIN_ANDNPD, + IX86_BUILTIN_ORPD, + IX86_BUILTIN_XORPD, + + IX86_BUILTIN_SQRTPD, + IX86_BUILTIN_SQRTSD, + + IX86_BUILTIN_UNPCKHPD, + IX86_BUILTIN_UNPCKLPD, + + IX86_BUILTIN_SHUFPD, + + IX86_BUILTIN_LOADUPD, + IX86_BUILTIN_STOREUPD, + IX86_BUILTIN_MOVSD, + + IX86_BUILTIN_LOADHPD, + IX86_BUILTIN_LOADLPD, + + IX86_BUILTIN_CVTDQ2PD, + IX86_BUILTIN_CVTDQ2PS, + + IX86_BUILTIN_CVTPD2DQ, + IX86_BUILTIN_CVTPD2PI, + IX86_BUILTIN_CVTPD2PS, + IX86_BUILTIN_CVTTPD2DQ, + IX86_BUILTIN_CVTTPD2PI, + + IX86_BUILTIN_CVTPI2PD, + IX86_BUILTIN_CVTSI2SD, + IX86_BUILTIN_CVTSI642SD, + + IX86_BUILTIN_CVTSD2SI, + IX86_BUILTIN_CVTSD2SI64, + IX86_BUILTIN_CVTSD2SS, + IX86_BUILTIN_CVTSS2SD, + IX86_BUILTIN_CVTTSD2SI, + IX86_BUILTIN_CVTTSD2SI64, + + IX86_BUILTIN_CVTPS2DQ, + IX86_BUILTIN_CVTPS2PD, + IX86_BUILTIN_CVTTPS2DQ, + + IX86_BUILTIN_MOVNTI, + IX86_BUILTIN_MOVNTPD, + IX86_BUILTIN_MOVNTDQ, + + /* SSE2 MMX */ + IX86_BUILTIN_MASKMOVDQU, + IX86_BUILTIN_MOVMSKPD, + IX86_BUILTIN_PMOVMSKB128, + IX86_BUILTIN_MOVQ2DQ, + IX86_BUILTIN_MOVDQ2Q, + + IX86_BUILTIN_PACKSSWB128, + IX86_BUILTIN_PACKSSDW128, + IX86_BUILTIN_PACKUSWB128, + + IX86_BUILTIN_PADDB128, + IX86_BUILTIN_PADDW128, + IX86_BUILTIN_PADDD128, + IX86_BUILTIN_PADDQ128, + IX86_BUILTIN_PADDSB128, + IX86_BUILTIN_PADDSW128, + IX86_BUILTIN_PADDUSB128, + IX86_BUILTIN_PADDUSW128, + IX86_BUILTIN_PSUBB128, + IX86_BUILTIN_PSUBW128, + IX86_BUILTIN_PSUBD128, + IX86_BUILTIN_PSUBQ128, + IX86_BUILTIN_PSUBSB128, + IX86_BUILTIN_PSUBSW128, + IX86_BUILTIN_PSUBUSB128, + IX86_BUILTIN_PSUBUSW128, + + IX86_BUILTIN_PAND128, + IX86_BUILTIN_PANDN128, + IX86_BUILTIN_POR128, + IX86_BUILTIN_PXOR128, + + IX86_BUILTIN_PAVGB128, + IX86_BUILTIN_PAVGW128, + + IX86_BUILTIN_PCMPEQB128, + IX86_BUILTIN_PCMPEQW128, + IX86_BUILTIN_PCMPEQD128, + IX86_BUILTIN_PCMPGTB128, + IX86_BUILTIN_PCMPGTW128, + IX86_BUILTIN_PCMPGTD128, + + IX86_BUILTIN_PMADDWD128, + + IX86_BUILTIN_PMAXSW128, + IX86_BUILTIN_PMAXUB128, + IX86_BUILTIN_PMINSW128, + IX86_BUILTIN_PMINUB128, + + IX86_BUILTIN_PMULUDQ, + IX86_BUILTIN_PMULUDQ128, + IX86_BUILTIN_PMULHUW128, + IX86_BUILTIN_PMULHW128, + IX86_BUILTIN_PMULLW128, + + IX86_BUILTIN_PSADBW128, + IX86_BUILTIN_PSHUFHW, + IX86_BUILTIN_PSHUFLW, + IX86_BUILTIN_PSHUFD, + + IX86_BUILTIN_PSLLW128, + IX86_BUILTIN_PSLLD128, + IX86_BUILTIN_PSLLQ128, + IX86_BUILTIN_PSRAW128, + IX86_BUILTIN_PSRAD128, + IX86_BUILTIN_PSRLW128, + IX86_BUILTIN_PSRLD128, + IX86_BUILTIN_PSRLQ128, + IX86_BUILTIN_PSLLDQI128, + IX86_BUILTIN_PSLLWI128, + IX86_BUILTIN_PSLLDI128, + IX86_BUILTIN_PSLLQI128, + IX86_BUILTIN_PSRAWI128, + IX86_BUILTIN_PSRADI128, + IX86_BUILTIN_PSRLDQI128, + IX86_BUILTIN_PSRLWI128, + IX86_BUILTIN_PSRLDI128, + IX86_BUILTIN_PSRLQI128, + + IX86_BUILTIN_PUNPCKHBW128, + IX86_BUILTIN_PUNPCKHWD128, + IX86_BUILTIN_PUNPCKHDQ128, + IX86_BUILTIN_PUNPCKHQDQ128, + IX86_BUILTIN_PUNPCKLBW128, + IX86_BUILTIN_PUNPCKLWD128, + IX86_BUILTIN_PUNPCKLDQ128, + IX86_BUILTIN_PUNPCKLQDQ128, + + IX86_BUILTIN_CLFLUSH, + IX86_BUILTIN_MFENCE, + IX86_BUILTIN_LFENCE, + + /* Prescott New Instructions. */ + IX86_BUILTIN_ADDSUBPS, + IX86_BUILTIN_HADDPS, + IX86_BUILTIN_HSUBPS, + IX86_BUILTIN_MOVSHDUP, + IX86_BUILTIN_MOVSLDUP, + IX86_BUILTIN_ADDSUBPD, + IX86_BUILTIN_HADDPD, + IX86_BUILTIN_HSUBPD, + IX86_BUILTIN_LDDQU, + + IX86_BUILTIN_MONITOR, + IX86_BUILTIN_MWAIT, + + IX86_BUILTIN_VEC_INIT_V2SI, + IX86_BUILTIN_VEC_INIT_V4HI, + IX86_BUILTIN_VEC_INIT_V8QI, + IX86_BUILTIN_VEC_EXT_V2DF, + IX86_BUILTIN_VEC_EXT_V2DI, + IX86_BUILTIN_VEC_EXT_V4SF, + IX86_BUILTIN_VEC_EXT_V8HI, + IX86_BUILTIN_VEC_EXT_V4HI, + IX86_BUILTIN_VEC_SET_V8HI, + IX86_BUILTIN_VEC_SET_V4HI, + + IX86_BUILTIN_MAX +}; + #define def_builtin(MASK, NAME, TYPE, CODE) \ do { \ if ((MASK) & target_flags \ @@ -12549,10 +12986,9 @@ static const struct builtin_description bdesc_1arg[] = /* SSE3 */ { MASK_SSE3, CODE_FOR_sse3_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 }, { MASK_SSE3, CODE_FOR_sse3_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 }, - { MASK_SSE3, CODE_FOR_sse3_movddup, 0, IX86_BUILTIN_MOVDDUP, 0, 0 } }; -void +static void ix86_init_builtins (void) { if (TARGET_MMX) @@ -12616,13 +13052,7 @@ ix86_init_mmx_sse_builtins (void) tree v4sf_ftype_v4sf_v2si = build_function_type_list (V4SF_type_node, V4SF_type_node, V2SI_type_node, NULL_TREE); - tree int_ftype_v4hi_int - = build_function_type_list (integer_type_node, - V4HI_type_node, integer_type_node, NULL_TREE); - tree v4hi_ftype_v4hi_int_int - = build_function_type_list (V4HI_type_node, V4HI_type_node, - integer_type_node, integer_type_node, - NULL_TREE); + /* Miscellaneous. */ tree v8qi_ftype_v4hi_v4hi = build_function_type_list (V8QI_type_node, @@ -12661,10 +13091,6 @@ ix86_init_mmx_sse_builtins (void) NULL_TREE); tree unsigned_ftype_void = build_function_type (unsigned_type_node, void_list_node); - tree di_ftype_void - = build_function_type (long_long_unsigned_type_node, void_list_node); - tree v4sf_ftype_void - = build_function_type (V4SF_type_node, void_list_node); tree v2si_ftype_v4sf = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE); /* Loads/stores. */ @@ -12737,10 +13163,6 @@ ix86_init_mmx_sse_builtins (void) = build_function_type_list (integer_type_node, V2DF_type_node, V2DF_type_node, NULL_TREE); - tree ti_ftype_void - = build_function_type (intTI_type_node, void_list_node); - tree v2di_ftype_void - = build_function_type (V2DI_type_node, void_list_node); tree ti_ftype_ti_ti = build_function_type_list (intTI_type_node, intTI_type_node, intTI_type_node, NULL_TREE); @@ -12826,18 +13248,6 @@ ix86_init_mmx_sse_builtins (void) V2DF_type_node, V2DF_type_node, NULL_TREE); tree v2df_ftype_v2df = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE); - tree v2df_ftype_double - = build_function_type_list (V2DF_type_node, double_type_node, NULL_TREE); - tree v2df_ftype_double_double - = build_function_type_list (V2DF_type_node, - double_type_node, double_type_node, NULL_TREE); - tree int_ftype_v8hi_int - = build_function_type_list (integer_type_node, - V8HI_type_node, integer_type_node, NULL_TREE); - tree v8hi_ftype_v8hi_int_int - = build_function_type_list (V8HI_type_node, - V8HI_type_node, integer_type_node, - integer_type_node, NULL_TREE); tree v2di_ftype_v2di_int = build_function_type_list (V2DI_type_node, V2DI_type_node, integer_type_node, NULL_TREE); @@ -12885,6 +13295,7 @@ ix86_init_mmx_sse_builtins (void) tree float80_type; tree float128_type; + tree ftype; /* The __float80 type. */ if (TYPE_MODE (long_double_type_node) == XFmode) @@ -12970,7 +13381,6 @@ ix86_init_mmx_sse_builtins (void) } /* Add the remaining MMX insns with somewhat more complicated types. */ - def_builtin (MASK_MMX, "__builtin_ia32_mmx_zero", di_ftype_void, IX86_BUILTIN_MMX_ZERO); def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS); def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW); def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD); @@ -13009,17 +13419,10 @@ ix86_init_mmx_sse_builtins (void) def_builtin (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI); def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64); - def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pextrw", int_ftype_v4hi_int, IX86_BUILTIN_PEXTRW); - def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pinsrw", v4hi_ftype_v4hi_int_int, IX86_BUILTIN_PINSRW); - def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ); - def_builtin (MASK_SSE, "__builtin_ia32_loadaps", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADAPS); def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS); - def_builtin (MASK_SSE, "__builtin_ia32_loadss", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADSS); - def_builtin (MASK_SSE, "__builtin_ia32_storeaps", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREAPS); def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS); - def_builtin (MASK_SSE, "__builtin_ia32_storess", void_ftype_pfloat_v4sf, IX86_BUILTIN_STORESS); def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS); def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS); @@ -13074,27 +13477,16 @@ ix86_init_mmx_sse_builtins (void) def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF); def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI); - def_builtin (MASK_SSE, "__builtin_ia32_setzerops", v4sf_ftype_void, IX86_BUILTIN_SSE_ZERO); - /* SSE2 */ - def_builtin (MASK_SSE2, "__builtin_ia32_pextrw128", int_ftype_v8hi_int, IX86_BUILTIN_PEXTRW128); - def_builtin (MASK_SSE2, "__builtin_ia32_pinsrw128", v8hi_ftype_v8hi_int_int, IX86_BUILTIN_PINSRW128); - def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU); def_builtin (MASK_SSE2, "__builtin_ia32_movq2dq", v2di_ftype_di, IX86_BUILTIN_MOVQ2DQ); def_builtin (MASK_SSE2, "__builtin_ia32_movdq2q", di_ftype_v2di, IX86_BUILTIN_MOVDQ2Q); - def_builtin (MASK_SSE2, "__builtin_ia32_loadapd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADAPD); def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD); - def_builtin (MASK_SSE2, "__builtin_ia32_loadsd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADSD); - def_builtin (MASK_SSE2, "__builtin_ia32_storeapd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREAPD); def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD); - def_builtin (MASK_SSE2, "__builtin_ia32_storesd", void_ftype_pdouble_v2df, IX86_BUILTIN_STORESD); def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADHPD); def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADLPD); - def_builtin (MASK_SSE2, "__builtin_ia32_storehpd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREHPD); - def_builtin (MASK_SSE2, "__builtin_ia32_storelpd", void_ftype_pdouble_v2df, IX86_BUILTIN_STORELPD); def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD); def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128); @@ -13137,28 +13529,16 @@ ix86_init_mmx_sse_builtins (void) def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS); def_builtin (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD); - def_builtin (MASK_SSE2, "__builtin_ia32_setpd1", v2df_ftype_double, IX86_BUILTIN_SETPD1); - def_builtin (MASK_SSE2, "__builtin_ia32_setpd", v2df_ftype_double_double, IX86_BUILTIN_SETPD); - def_builtin (MASK_SSE2, "__builtin_ia32_setzeropd", ti_ftype_void, IX86_BUILTIN_CLRPD); - def_builtin (MASK_SSE2, "__builtin_ia32_loadpd1", v2df_ftype_pcdouble, IX86_BUILTIN_LOADPD1); - def_builtin (MASK_SSE2, "__builtin_ia32_loadrpd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADRPD); - def_builtin (MASK_SSE2, "__builtin_ia32_storepd1", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREPD1); - def_builtin (MASK_SSE2, "__builtin_ia32_storerpd", void_ftype_pdouble_v2df, IX86_BUILTIN_STORERPD); - def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH); def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE); def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE); - def_builtin (MASK_SSE2, "__builtin_ia32_loaddqa", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQA); def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU); def_builtin (MASK_SSE2, "__builtin_ia32_loadd", v4si_ftype_pcint, IX86_BUILTIN_LOADD); - def_builtin (MASK_SSE2, "__builtin_ia32_storedqa", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQA); def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU); def_builtin (MASK_SSE2, "__builtin_ia32_stored", void_ftype_pcint_v4si, IX86_BUILTIN_STORED); def_builtin (MASK_SSE2, "__builtin_ia32_movq", v2di_ftype_v2di, IX86_BUILTIN_MOVQ); - def_builtin (MASK_SSE, "__builtin_ia32_setzero128", v2di_ftype_void, IX86_BUILTIN_CLRTI); - def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq", di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ); def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128); @@ -13203,10 +13583,67 @@ ix86_init_mmx_sse_builtins (void) IX86_BUILTIN_MOVSLDUP); def_builtin (MASK_SSE3, "__builtin_ia32_lddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU); - def_builtin (MASK_SSE3, "__builtin_ia32_loadddup", - v2df_ftype_pcdouble, IX86_BUILTIN_LOADDDUP); - def_builtin (MASK_SSE3, "__builtin_ia32_movddup", - v2df_ftype_v2df, IX86_BUILTIN_MOVDDUP); + + /* Access to the vec_init patterns. */ + ftype = build_function_type_list (V2SI_type_node, integer_type_node, + integer_type_node, NULL_TREE); + def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v2si", + ftype, IX86_BUILTIN_VEC_INIT_V2SI); + + ftype = build_function_type_list (V4HI_type_node, short_integer_type_node, + short_integer_type_node, + short_integer_type_node, + short_integer_type_node, NULL_TREE); + def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v4hi", + ftype, IX86_BUILTIN_VEC_INIT_V4HI); + + ftype = build_function_type_list (V8QI_type_node, char_type_node, + char_type_node, char_type_node, + char_type_node, char_type_node, + char_type_node, char_type_node, + char_type_node, NULL_TREE); + def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v8qi", + ftype, IX86_BUILTIN_VEC_INIT_V8QI); + + /* Access to the vec_extract patterns. */ + ftype = build_function_type_list (double_type_node, V2DF_type_node, + integer_type_node, NULL_TREE); + def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2df", + ftype, IX86_BUILTIN_VEC_EXT_V2DF); + + ftype = build_function_type_list (long_long_integer_type_node, + V2DI_type_node, integer_type_node, + NULL_TREE); + def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2di", + ftype, IX86_BUILTIN_VEC_EXT_V2DI); + + ftype = build_function_type_list (float_type_node, V4SF_type_node, + integer_type_node, NULL_TREE); + def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4sf", + ftype, IX86_BUILTIN_VEC_EXT_V4SF); + + ftype = build_function_type_list (intHI_type_node, V8HI_type_node, + integer_type_node, NULL_TREE); + def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v8hi", + ftype, IX86_BUILTIN_VEC_EXT_V8HI); + + ftype = build_function_type_list (intHI_type_node, V4HI_type_node, + integer_type_node, NULL_TREE); + def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_ext_v4hi", + ftype, IX86_BUILTIN_VEC_EXT_V4HI); + + /* Access to the vec_set patterns. */ + ftype = build_function_type_list (V8HI_type_node, V8HI_type_node, + intHI_type_node, + integer_type_node, NULL_TREE); + def_builtin (MASK_SSE, "__builtin_ia32_vec_set_v8hi", + ftype, IX86_BUILTIN_VEC_SET_V8HI); + + ftype = build_function_type_list (V4HI_type_node, V4HI_type_node, + intHI_type_node, + integer_type_node, NULL_TREE); + def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_set_v4hi", + ftype, IX86_BUILTIN_VEC_SET_V4HI); } /* Errors in the source file can cause expand_expr to return const0_rtx @@ -13262,12 +13699,27 @@ ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target) if (!(*insn_data[icode].operand[2].predicate) (op1, mode1)) op1 = copy_to_mode_reg (mode1, op1); + /* ??? Using ix86_fixup_binary_operands is problematic when + we've got mismatched modes. Fake it. */ + xops[0] = target; xops[1] = op0; xops[2] = op1; - target = ix86_fixup_binary_operands (UNKNOWN, tmode, xops); - pat = GEN_FCN (icode) (target, xops[1], xops[2]); + if (tmode == mode0 && tmode == mode1) + { + target = ix86_fixup_binary_operands (UNKNOWN, tmode, xops); + op0 = xops[1]; + op1 = xops[2]; + } + else if (optimize || !ix86_binary_operator_ok (UNKNOWN, tmode, xops)) + { + op0 = force_reg (mode0, op0); + op1 = force_reg (mode1, op1); + target = gen_reg_rtx (tmode); + } + + pat = GEN_FCN (icode) (target, op0, op1); if (! pat) return 0; emit_insn (pat); @@ -13476,13 +13928,132 @@ ix86_expand_sse_comi (const struct builtin_description *d, tree arglist, return SUBREG_REG (target); } +/* Return the integer constant in ARG. Constrain it to be in the range + of the subparts of VEC_TYPE; issue an error if not. */ + +static int +get_element_number (tree vec_type, tree arg) +{ + unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1; + + if (!host_integerp (arg, 1) + || (elt = tree_low_cst (arg, 1), elt > max)) + { + error ("selector must be an integer constant in the range 0..%i", max); + return 0; + } + + return elt; +} + +/* A subroutine of ix86_expand_builtin. These builtins are a wrapper around + ix86_expand_vector_init. We DO have language-level syntax for this, in + the form of (type){ init-list }. Except that since we can't place emms + instructions from inside the compiler, we can't allow the use of MMX + registers unless the user explicitly asks for it. So we do *not* define + vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead + we have builtins invoked by mmintrin.h that gives us license to emit + these sorts of instructions. */ + +static rtx +ix86_expand_vec_init_builtin (tree type, tree arglist, rtx target) +{ + enum machine_mode tmode = TYPE_MODE (type); + enum machine_mode inner_mode = GET_MODE_INNER (tmode); + int i, n_elt = GET_MODE_NUNITS (tmode); + rtvec v = rtvec_alloc (n_elt); + + gcc_assert (VECTOR_MODE_P (tmode)); + + for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist)) + { + rtx x = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0); + RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x); + } + + gcc_assert (arglist == NULL); + + if (!target || !register_operand (target, tmode)) + target = gen_reg_rtx (tmode); + + ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v)); + return target; +} + +/* A subroutine of ix86_expand_builtin. These builtins are a wrapper around + ix86_expand_vector_extract. They would be redundant (for non-MMX) if we + had a language-level syntax for referencing vector elements. */ + +static rtx +ix86_expand_vec_ext_builtin (tree arglist, rtx target) +{ + enum machine_mode tmode, mode0; + tree arg0, arg1; + int elt; + rtx op0; + + arg0 = TREE_VALUE (arglist); + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); + + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); + elt = get_element_number (TREE_TYPE (arg0), arg1); + + tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0))); + mode0 = TYPE_MODE (TREE_TYPE (arg0)); + gcc_assert (VECTOR_MODE_P (mode0)); + + op0 = force_reg (mode0, op0); + + if (optimize || !target || !register_operand (target, tmode)) + target = gen_reg_rtx (tmode); + + ix86_expand_vector_extract (true, target, op0, elt); + + return target; +} + +/* A subroutine of ix86_expand_builtin. These builtins are a wrapper around + ix86_expand_vector_set. They would be redundant (for non-MMX) if we had + a language-level syntax for referencing vector elements. */ + +static rtx +ix86_expand_vec_set_builtin (tree arglist) +{ + enum machine_mode tmode, mode1; + tree arg0, arg1, arg2; + int elt; + rtx op0, op1; + + arg0 = TREE_VALUE (arglist); + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); + arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); + + tmode = TYPE_MODE (TREE_TYPE (arg0)); + mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0))); + gcc_assert (VECTOR_MODE_P (tmode)); + + op0 = expand_expr (arg0, NULL_RTX, tmode, 0); + op1 = expand_expr (arg1, NULL_RTX, mode1, 0); + elt = get_element_number (TREE_TYPE (arg0), arg2); + + if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode) + op1 = convert_modes (mode1, GET_MODE (op1), op1, true); + + op0 = force_reg (tmode, op0); + op1 = force_reg (mode1, op1); + + ix86_expand_vector_set (true, op0, op1, elt); + + return op0; +} + /* Expand an expression EXP that calls a built-in function, with result going to TARGET if that's convenient (and in mode MODE if that's convenient). SUBTARGET may be used as the target for computing one of EXP's operands. IGNORE is nonzero if the value is to be ignored. */ -rtx +static rtx ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, enum machine_mode mode ATTRIBUTE_UNUSED, int ignore ATTRIBUTE_UNUSED) @@ -13507,74 +14078,6 @@ ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, emit_insn (gen_sse_sfence ()); return 0; - case IX86_BUILTIN_PEXTRW: - case IX86_BUILTIN_PEXTRW128: - icode = (fcode == IX86_BUILTIN_PEXTRW - ? CODE_FOR_mmx_pextrw - : CODE_FOR_sse2_pextrw); - arg0 = TREE_VALUE (arglist); - arg1 = TREE_VALUE (TREE_CHAIN (arglist)); - op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); - op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); - tmode = insn_data[icode].operand[0].mode; - mode0 = insn_data[icode].operand[1].mode; - mode1 = insn_data[icode].operand[2].mode; - - if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)) - op0 = copy_to_mode_reg (mode0, op0); - if (! (*insn_data[icode].operand[2].predicate) (op1, mode1)) - { - error ("selector must be an integer constant in the range 0..%i", - fcode == IX86_BUILTIN_PEXTRW ? 3:7); - return gen_reg_rtx (tmode); - } - if (target == 0 - || GET_MODE (target) != tmode - || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) - target = gen_reg_rtx (tmode); - pat = GEN_FCN (icode) (target, op0, op1); - if (! pat) - return 0; - emit_insn (pat); - return target; - - case IX86_BUILTIN_PINSRW: - case IX86_BUILTIN_PINSRW128: - icode = (fcode == IX86_BUILTIN_PINSRW - ? CODE_FOR_mmx_pinsrw - : CODE_FOR_sse2_pinsrw); - arg0 = TREE_VALUE (arglist); - arg1 = TREE_VALUE (TREE_CHAIN (arglist)); - arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); - op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); - op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); - op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0); - tmode = insn_data[icode].operand[0].mode; - mode0 = insn_data[icode].operand[1].mode; - mode1 = insn_data[icode].operand[2].mode; - mode2 = insn_data[icode].operand[3].mode; - - if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)) - op0 = copy_to_mode_reg (mode0, op0); - if ((optimize && !register_operand (op1, mode1)) - || ! (*insn_data[icode].operand[2].predicate) (op1, mode1)) - op1 = copy_to_mode_reg (mode1, op1); - if (! (*insn_data[icode].operand[3].predicate) (op2, mode2)) - { - error ("selector must be an integer constant in the range 0..%i", - fcode == IX86_BUILTIN_PINSRW ? 3:7); - return const0_rtx; - } - if (target == 0 - || GET_MODE (target) != tmode - || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) - target = gen_reg_rtx (tmode); - pat = GEN_FCN (icode) (target, op0, op1, op2); - if (! pat) - return 0; - emit_insn (pat); - return target; - case IX86_BUILTIN_MASKMOVQ: case IX86_BUILTIN_MASKMOVDQU: icode = (fcode == IX86_BUILTIN_MASKMOVQ @@ -13613,24 +14116,12 @@ ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, case IX86_BUILTIN_RCPSS: return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2, arglist, target); - case IX86_BUILTIN_LOADAPS: - return ix86_expand_unop_builtin (CODE_FOR_movv4sf, arglist, target, 1); - case IX86_BUILTIN_LOADUPS: return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1); - case IX86_BUILTIN_STOREAPS: - return ix86_expand_store_builtin (CODE_FOR_movv4sf, arglist); - case IX86_BUILTIN_STOREUPS: return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist); - case IX86_BUILTIN_LOADSS: - return ix86_expand_unop_builtin (CODE_FOR_sse_loadss, arglist, target, 1); - - case IX86_BUILTIN_STORESS: - return ix86_expand_store_builtin (CODE_FOR_sse_storess, arglist); - case IX86_BUILTIN_LOADHPS: case IX86_BUILTIN_LOADLPS: case IX86_BUILTIN_LOADHPD: @@ -13661,12 +14152,8 @@ ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, case IX86_BUILTIN_STOREHPS: case IX86_BUILTIN_STORELPS: - case IX86_BUILTIN_STOREHPD: - case IX86_BUILTIN_STORELPD: icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_storehps - : fcode == IX86_BUILTIN_STORELPS ? CODE_FOR_sse_storelps - : fcode == IX86_BUILTIN_STOREHPD ? CODE_FOR_sse2_storehpd - : CODE_FOR_sse2_storelpd); + : CODE_FOR_sse_storelps); arg0 = TREE_VALUE (arglist); arg1 = TREE_VALUE (TREE_CHAIN (arglist)); op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); @@ -13879,75 +14366,13 @@ ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, case IX86_BUILTIN_PSWAPDSF: return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2, arglist, target, 0); - case IX86_BUILTIN_SSE_ZERO: - return CONST0_RTX (V4SFmode); - - case IX86_BUILTIN_MMX_ZERO: - return const0_rtx; - - case IX86_BUILTIN_CLRTI: - return const0_rtx; - case IX86_BUILTIN_SQRTSD: return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2, arglist, target); - case IX86_BUILTIN_LOADAPD: - return ix86_expand_unop_builtin (CODE_FOR_movv2df, arglist, target, 1); case IX86_BUILTIN_LOADUPD: return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1); - - case IX86_BUILTIN_STOREAPD: - return ix86_expand_store_builtin (CODE_FOR_movv2df, arglist); case IX86_BUILTIN_STOREUPD: return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist); - case IX86_BUILTIN_LOADSD: - return ix86_expand_unop_builtin (CODE_FOR_sse2_loadsd, arglist, target, 1); - - case IX86_BUILTIN_STORESD: - return ix86_expand_store_builtin (CODE_FOR_sse2_storelpd, arglist); - - case IX86_BUILTIN_SETPD1: - target = assign_386_stack_local (DFmode, 0); - arg0 = TREE_VALUE (arglist); - emit_move_insn (adjust_address (target, DFmode, 0), - expand_expr (arg0, NULL_RTX, VOIDmode, 0)); - op0 = gen_reg_rtx (V2DFmode); - emit_insn (gen_sse2_loadsd (op0, adjust_address (target, V2DFmode, 0))); - emit_insn (gen_sse2_shufpd (op0, op0, op0, const0_rtx)); - return op0; - - case IX86_BUILTIN_SETPD: - target = assign_386_stack_local (V2DFmode, 0); - arg0 = TREE_VALUE (arglist); - arg1 = TREE_VALUE (TREE_CHAIN (arglist)); - emit_move_insn (adjust_address (target, DFmode, 0), - expand_expr (arg0, NULL_RTX, VOIDmode, 0)); - emit_move_insn (adjust_address (target, DFmode, 8), - expand_expr (arg1, NULL_RTX, VOIDmode, 0)); - op0 = gen_reg_rtx (V2DFmode); - emit_move_insn (op0, target); - return op0; - - case IX86_BUILTIN_LOADRPD: - target = ix86_expand_unop_builtin (CODE_FOR_movv2df, arglist, - gen_reg_rtx (V2DFmode), 1); - emit_insn (gen_sse2_shufpd (target, target, target, const1_rtx)); - return target; - - case IX86_BUILTIN_LOADPD1: - target = ix86_expand_unop_builtin (CODE_FOR_sse2_loadsd, arglist, - gen_reg_rtx (V2DFmode), 1); - emit_insn (gen_sse2_shufpd (target, target, target, const0_rtx)); - return target; - - case IX86_BUILTIN_STOREPD1: - return ix86_expand_store_builtin (CODE_FOR_movv2df, arglist); - case IX86_BUILTIN_STORERPD: - return ix86_expand_store_builtin (CODE_FOR_movv2df, arglist); - - case IX86_BUILTIN_CLRPD: - return CONST0_RTX (V2DFmode); - case IX86_BUILTIN_MFENCE: emit_insn (gen_sse2_mfence ()); return 0; @@ -13972,15 +14397,11 @@ ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, case IX86_BUILTIN_MOVNTI: return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist); - case IX86_BUILTIN_LOADDQA: - return ix86_expand_unop_builtin (CODE_FOR_movv2di, arglist, target, 1); case IX86_BUILTIN_LOADDQU: return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1); case IX86_BUILTIN_LOADD: return ix86_expand_unop_builtin (CODE_FOR_sse2_loadd, arglist, target, 1); - case IX86_BUILTIN_STOREDQA: - return ix86_expand_store_builtin (CODE_FOR_movv2di, arglist); case IX86_BUILTIN_STOREDQU: return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist); case IX86_BUILTIN_STORED: @@ -14014,12 +14435,25 @@ ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, emit_insn (gen_sse3_mwait (op0, op1)); return 0; - case IX86_BUILTIN_LOADDDUP: - return ix86_expand_unop_builtin (CODE_FOR_sse3_loadddup, arglist, target, 1); - case IX86_BUILTIN_LDDQU: - return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, arglist, target, - 1); + return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, arglist, + target, 1); + + case IX86_BUILTIN_VEC_INIT_V2SI: + case IX86_BUILTIN_VEC_INIT_V4HI: + case IX86_BUILTIN_VEC_INIT_V8QI: + return ix86_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target); + + case IX86_BUILTIN_VEC_EXT_V2DF: + case IX86_BUILTIN_VEC_EXT_V2DI: + case IX86_BUILTIN_VEC_EXT_V4SF: + case IX86_BUILTIN_VEC_EXT_V8HI: + case IX86_BUILTIN_VEC_EXT_V4HI: + return ix86_expand_vec_ext_builtin (arglist, target); + + case IX86_BUILTIN_VEC_SET_V8HI: + case IX86_BUILTIN_VEC_SET_V4HI: + return ix86_expand_vec_set_builtin (arglist); default: break; @@ -15382,100 +15816,670 @@ x86_emit_floatuns (rtx operands[2]) emit_label (donelab); } + +/* A subroutine of ix86_expand_vector_init. Store into TARGET a vector + with all elements equal to VAR. Return true if successful. */ + +static bool +ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode, + rtx target, rtx val) +{ + enum machine_mode smode, wsmode, wvmode; + rtx x; + + switch (mode) + { + case V2SImode: + case V2SFmode: + if (!mmx_ok && !TARGET_SSE) + return false; + /* FALLTHRU */ + + case V2DFmode: + case V2DImode: + case V4SFmode: + case V4SImode: + val = force_reg (GET_MODE_INNER (mode), val); + x = gen_rtx_VEC_DUPLICATE (mode, val); + emit_insn (gen_rtx_SET (VOIDmode, target, x)); + return true; + + case V4HImode: + if (!mmx_ok) + return false; + val = gen_lowpart (SImode, val); + x = gen_rtx_TRUNCATE (HImode, val); + x = gen_rtx_VEC_DUPLICATE (mode, x); + emit_insn (gen_rtx_SET (VOIDmode, target, x)); + return true; + + case V8QImode: + if (!mmx_ok) + return false; + smode = QImode; + wsmode = HImode; + wvmode = V4HImode; + goto widen; + case V8HImode: + smode = HImode; + wsmode = SImode; + wvmode = V4SImode; + goto widen; + case V16QImode: + smode = QImode; + wsmode = HImode; + wvmode = V8HImode; + goto widen; + widen: + /* Replicate the value once into the next wider mode and recurse. */ + val = convert_modes (wsmode, smode, val, true); + x = expand_simple_binop (wsmode, ASHIFT, val, + GEN_INT (GET_MODE_BITSIZE (smode)), + NULL_RTX, 1, OPTAB_LIB_WIDEN); + val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN); + + x = gen_reg_rtx (wvmode); + if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val)) + gcc_unreachable (); + emit_move_insn (target, gen_lowpart (mode, x)); + return true; + + default: + return false; + } +} + +/* A subroutine of ix86_expand_vector_init. Store into TARGET a vector + whose low element is VAR, and other elements are zero. Return true + if successful. */ + +static bool +ix86_expand_vector_init_low_nonzero (bool mmx_ok, enum machine_mode mode, + rtx target, rtx var) +{ + enum machine_mode vsimode; + rtx x; + + switch (mode) + { + case V2SFmode: + case V2SImode: + if (!mmx_ok && !TARGET_SSE) + return false; + /* FALLTHRU */ + + case V2DFmode: + case V2DImode: + var = force_reg (GET_MODE_INNER (mode), var); + x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode))); + emit_insn (gen_rtx_SET (VOIDmode, target, x)); + return true; + + case V4SFmode: + case V4SImode: + var = force_reg (GET_MODE_INNER (mode), var); + x = gen_rtx_VEC_DUPLICATE (mode, var); + x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx); + emit_insn (gen_rtx_SET (VOIDmode, target, x)); + return true; + + case V8HImode: + case V16QImode: + vsimode = V4SImode; + goto widen; + case V4HImode: + case V8QImode: + if (!mmx_ok) + return false; + vsimode = V2SImode; + goto widen; + widen: + /* Zero extend the variable element to SImode and recurse. */ + var = convert_modes (SImode, GET_MODE_INNER (mode), var, true); + + x = gen_reg_rtx (vsimode); + if (!ix86_expand_vector_init_low_nonzero (mmx_ok, vsimode, x, var)) + gcc_unreachable (); + + emit_move_insn (target, gen_lowpart (mode, x)); + return true; + + default: + return false; + } +} + +/* A subroutine of ix86_expand_vector_init. Store into TARGET a vector + consisting of the values in VALS. It is known that all elements + except ONE_VAR are constants. Return true if successful. */ + +static bool +ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode, + rtx target, rtx vals, int one_var) +{ + rtx var = XVECEXP (vals, 0, one_var); + enum machine_mode wmode; + rtx const_vec, x; + + XVECEXP (vals, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode)); + const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)); + + switch (mode) + { + case V2DFmode: + case V2DImode: + case V2SFmode: + case V2SImode: + /* For the two element vectors, it's just as easy to use + the general case. */ + return false; + + case V4SFmode: + case V4SImode: + case V8HImode: + case V4HImode: + break; + + case V16QImode: + wmode = V8HImode; + goto widen; + case V8QImode: + wmode = V4HImode; + goto widen; + widen: + /* There's no way to set one QImode entry easily. Combine + the variable value with its adjacent constant value, and + promote to an HImode set. */ + x = XVECEXP (vals, 0, one_var ^ 1); + if (one_var & 1) + { + var = convert_modes (HImode, QImode, var, true); + var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8), + NULL_RTX, 1, OPTAB_LIB_WIDEN); + x = GEN_INT (INTVAL (x) & 0xff); + } + else + { + var = convert_modes (HImode, QImode, var, true); + x = gen_int_mode (INTVAL (x) << 8, HImode); + } + if (x != const0_rtx) + var = expand_simple_binop (HImode, IOR, var, x, var, + 1, OPTAB_LIB_WIDEN); + + x = gen_reg_rtx (wmode); + emit_move_insn (x, gen_lowpart (wmode, const_vec)); + ix86_expand_vector_set (mmx_ok, target, var, one_var >> 1); + + emit_move_insn (target, gen_lowpart (mode, x)); + return true; + + default: + return false; + } + + emit_move_insn (target, const_vec); + ix86_expand_vector_set (mmx_ok, target, var, one_var); + return true; +} + +/* A subroutine of ix86_expand_vector_init. Handle the most general case: + all values variable, and none identical. */ + +static void +ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode, + rtx target, rtx vals) +{ + enum machine_mode half_mode = GET_MODE_INNER (mode); + rtx op0 = NULL, op1 = NULL; + bool use_vec_concat = false; + + switch (mode) + { + case V2SFmode: + case V2SImode: + if (!mmx_ok && !TARGET_SSE) + break; + /* FALLTHRU */ + + case V2DFmode: + case V2DImode: + /* For the two element vectors, we always implement VEC_CONCAT. */ + op0 = XVECEXP (vals, 0, 0); + op1 = XVECEXP (vals, 0, 1); + use_vec_concat = true; + break; + + case V4SFmode: + half_mode = V2SFmode; + goto half; + case V4SImode: + half_mode = V2SImode; + goto half; + half: + { + rtvec v; + + /* For V4SF and V4SI, we implement a concat of two V2 vectors. + Recurse to load the two halves. */ + + op0 = gen_reg_rtx (half_mode); + v = gen_rtvec (2, XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1)); + ix86_expand_vector_init (false, op0, gen_rtx_PARALLEL (half_mode, v)); + + op1 = gen_reg_rtx (half_mode); + v = gen_rtvec (2, XVECEXP (vals, 0, 2), XVECEXP (vals, 0, 3)); + ix86_expand_vector_init (false, op1, gen_rtx_PARALLEL (half_mode, v)); + + use_vec_concat = true; + } + break; + + case V8HImode: + case V16QImode: + case V4HImode: + case V8QImode: + break; + + default: + gcc_unreachable (); + } + + if (use_vec_concat) + { + if (!register_operand (op0, half_mode)) + op0 = force_reg (half_mode, op0); + if (!register_operand (op1, half_mode)) + op1 = force_reg (half_mode, op1); + + emit_insn (gen_rtx_SET (VOIDmode, target, + gen_rtx_VEC_CONCAT (mode, op0, op1))); + } + else + { + int i, j, n_elts, n_words, n_elt_per_word; + enum machine_mode inner_mode; + rtx words[4], shift; + + inner_mode = GET_MODE_INNER (mode); + n_elts = GET_MODE_NUNITS (mode); + n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD; + n_elt_per_word = n_elts / n_words; + shift = GEN_INT (GET_MODE_BITSIZE (inner_mode)); + + for (i = 0; i < n_words; ++i) + { + rtx word = NULL_RTX; + + for (j = 0; j < n_elt_per_word; ++j) + { + rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1); + elt = convert_modes (word_mode, inner_mode, elt, true); + + if (j == 0) + word = elt; + else + { + word = expand_simple_binop (word_mode, ASHIFT, word, shift, + word, 1, OPTAB_LIB_WIDEN); + word = expand_simple_binop (word_mode, IOR, word, elt, + word, 1, OPTAB_LIB_WIDEN); + } + } + + words[i] = word; + } + + if (n_words == 1) + emit_move_insn (target, gen_lowpart (mode, words[0])); + else if (n_words == 2) + { + rtx tmp = gen_reg_rtx (mode); + emit_insn (gen_rtx_CLOBBER (VOIDmode, tmp)); + emit_move_insn (gen_lowpart (word_mode, tmp), words[0]); + emit_move_insn (gen_highpart (word_mode, tmp), words[1]); + emit_move_insn (target, tmp); + } + else if (n_words == 4) + { + rtx tmp = gen_reg_rtx (V4SImode); + vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words)); + ix86_expand_vector_init_general (false, V4SImode, target, vals); + emit_move_insn (target, gen_lowpart (mode, tmp)); + } + else + gcc_unreachable (); + } +} + +/* Initialize vector TARGET via VALS. Suppress the use of MMX + instructions unless MMX_OK is true. */ -/* Initialize vector TARGET via VALS. */ void -ix86_expand_vector_init (rtx target, rtx vals) +ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals) { enum machine_mode mode = GET_MODE (target); - int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); - int n_elts = (GET_MODE_SIZE (mode) / elt_size); + enum machine_mode inner_mode = GET_MODE_INNER (mode); + int n_elts = GET_MODE_NUNITS (mode); + int n_var = 0, one_var = -1; + bool all_same = true, all_const_zero = true; int i; + rtx x; - for (i = n_elts - 1; i >= 0; i--) - if (GET_CODE (XVECEXP (vals, 0, i)) != CONST_INT - && GET_CODE (XVECEXP (vals, 0, i)) != CONST_DOUBLE) - break; + for (i = 0; i < n_elts; ++i) + { + x = XVECEXP (vals, 0, i); + if (!CONSTANT_P (x)) + n_var++, one_var = i; + else if (x != CONST0_RTX (inner_mode)) + all_const_zero = false; + if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0))) + all_same = false; + } - /* Few special cases first... - ... constants are best loaded from constant pool. */ - if (i < 0) + /* Constants are best loaded from the constant pool. */ + if (n_var == 0) { emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))); return; } - /* ... values where only first field is non-constant are best loaded - from the pool and overwritten via move later. */ - if (i == 0) + /* If all values are identical, broadcast the value. */ + if (all_same + && ix86_expand_vector_init_duplicate (mmx_ok, mode, target, + XVECEXP (vals, 0, 0))) + return; + + /* Values where only one field is non-constant are best loaded from + the pool and overwritten via move later. */ + if (n_var == 1) { - XVECEXP (vals, 0, 0) = CONST0_RTX (GET_MODE_INNER (mode)); - emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))); + if (all_const_zero && one_var == 0 + && ix86_expand_vector_init_low_nonzero (mmx_ok, mode, target, + XVECEXP (vals, 0, 0))) + return; + + if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var)) + return; + } + + ix86_expand_vector_init_general (mmx_ok, mode, target, vals); +} + +void +ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt) +{ + enum machine_mode mode = GET_MODE (target); + enum machine_mode inner_mode = GET_MODE_INNER (mode); + bool use_vec_merge = false; + rtx tmp; + + switch (mode) + { + case V2SFmode: + case V2SImode: + if (!mmx_ok) + break; + /* FALLTHRU */ + + case V2DFmode: + case V2DImode: + { + rtx op0, op1; + + /* For the two element vectors, we implement a VEC_CONCAT with + the extraction of the other element. */ + + tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt))); + tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp); + + if (elt == 0) + op0 = val, op1 = tmp; + else + op0 = tmp, op1 = val; + + tmp = gen_rtx_VEC_CONCAT (mode, op0, op1); + emit_insn (gen_rtx_SET (VOIDmode, target, tmp)); + } + return; - switch (GET_MODE (target)) + case V4SFmode: + switch (elt) { - case V2DFmode: - emit_insn (gen_sse2_loadlpd (target, target, XVECEXP (vals, 0, 0))); + case 0: + use_vec_merge = true; break; - case V4SFmode: - { - /* ??? We can represent this better. */ - rtx op = simplify_gen_subreg (mode, XVECEXP (vals, 0, 0), - GET_MODE_INNER (mode), 0); - op = force_reg (mode, op); - emit_insn (gen_sse_movss (target, target, op)); - } - break; + case 1: + /* tmp = op0 = A B C D */ + tmp = copy_to_reg (target); + + /* op0 = C C D D */ + emit_insn (gen_sse_unpcklps (target, target, target)); + + /* op0 = C C D X */ + ix86_expand_vector_set (false, target, val, 0); + + /* op0 = A B X D */ + emit_insn (gen_sse_shufps_1 (target, target, tmp, + GEN_INT (1), GEN_INT (0), + GEN_INT (2), GEN_INT (3))); + return; + + case 2: + tmp = copy_to_reg (target); + ix86_expand_vector_set (false, target, val, 0); + emit_insn (gen_sse_shufps_1 (target, target, tmp, + GEN_INT (0), GEN_INT (1), + GEN_INT (0), GEN_INT (3))); + return; + + case 3: + tmp = copy_to_reg (target); + ix86_expand_vector_set (false, target, val, 0); + emit_insn (gen_sse_shufps_1 (target, target, tmp, + GEN_INT (0), GEN_INT (1), + GEN_INT (2), GEN_INT (0))); + return; default: + gcc_unreachable (); + } + break; + + case V4SImode: + /* Element 0 handled by vec_merge below. */ + if (elt == 0) + { + use_vec_merge = true; break; } + + if (TARGET_SSE2) + { + /* With SSE2, use integer shuffles to swap element 0 and ELT, + store into element 0, then shuffle them back. */ + + rtx order[4]; + + order[0] = GEN_INT (elt); + order[1] = const1_rtx; + order[2] = const2_rtx; + order[3] = GEN_INT (3); + order[elt] = const0_rtx; + + emit_insn (gen_sse2_pshufd_1 (target, target, order[0], + order[1], order[2], order[3])); + + ix86_expand_vector_set (false, target, val, 0); + + emit_insn (gen_sse2_pshufd_1 (target, target, order[0], + order[1], order[2], order[3])); + } + else + { + /* For SSE1, we have to reuse the V4SF code. */ + ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target), + gen_lowpart (SFmode, val), elt); + } return; + + case V8HImode: + use_vec_merge = TARGET_SSE2; + break; + case V4HImode: + use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A); + break; + + case V16QImode: + case V8QImode: + default: + break; } - /* And the busy sequence doing rotations. */ - switch (GET_MODE (target)) + if (use_vec_merge) { - case V2DFmode: + tmp = gen_rtx_VEC_DUPLICATE (mode, val); + tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt)); + emit_insn (gen_rtx_SET (VOIDmode, target, tmp)); + } + else + { + rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false); + + emit_move_insn (mem, target); + + tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode)); + emit_move_insn (tmp, val); + + emit_move_insn (target, mem); + } +} + +void +ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt) +{ + enum machine_mode mode = GET_MODE (vec); + enum machine_mode inner_mode = GET_MODE_INNER (mode); + bool use_vec_extr = false; + rtx tmp; + + switch (mode) + { + case V2SImode: + case V2SFmode: + if (!mmx_ok) + break; + /* FALLTHRU */ + + case V2DFmode: + case V2DImode: + use_vec_extr = true; + break; + + case V4SFmode: + switch (elt) { - rtx vecop0 = - simplify_gen_subreg (V2DFmode, XVECEXP (vals, 0, 0), DFmode, 0); - rtx vecop1 = - simplify_gen_subreg (V2DFmode, XVECEXP (vals, 0, 1), DFmode, 0); + case 0: + tmp = vec; + break; - vecop0 = force_reg (V2DFmode, vecop0); - vecop1 = force_reg (V2DFmode, vecop1); - emit_insn (gen_sse2_unpcklpd (target, vecop0, vecop1)); + case 1: + case 3: + tmp = gen_reg_rtx (mode); + emit_insn (gen_sse_shufps_1 (tmp, vec, vec, + GEN_INT (elt), GEN_INT (elt), + GEN_INT (elt), GEN_INT (elt))); + break; + + case 2: + tmp = gen_reg_rtx (mode); + emit_insn (gen_sse_unpckhps (tmp, vec, vec)); + break; + + default: + gcc_unreachable (); } - break; - case V4SFmode: + vec = tmp; + use_vec_extr = true; + break; + + case V4SImode: + if (TARGET_SSE2) { - rtx vecop0 = - simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 0), SFmode, 0); - rtx vecop1 = - simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 1), SFmode, 0); - rtx vecop2 = - simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 2), SFmode, 0); - rtx vecop3 = - simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 3), SFmode, 0); - rtx tmp1 = gen_reg_rtx (V4SFmode); - rtx tmp2 = gen_reg_rtx (V4SFmode); - - vecop0 = force_reg (V4SFmode, vecop0); - vecop1 = force_reg (V4SFmode, vecop1); - vecop2 = force_reg (V4SFmode, vecop2); - vecop3 = force_reg (V4SFmode, vecop3); - emit_insn (gen_sse_unpcklps (tmp1, vecop1, vecop3)); - emit_insn (gen_sse_unpcklps (tmp2, vecop0, vecop2)); - emit_insn (gen_sse_unpcklps (target, tmp2, tmp1)); + switch (elt) + { + case 0: + tmp = vec; + break; + + case 1: + case 3: + tmp = gen_reg_rtx (mode); + emit_insn (gen_sse2_pshufd_1 (tmp, vec, + GEN_INT (elt), GEN_INT (elt), + GEN_INT (elt), GEN_INT (elt))); + break; + + case 2: + tmp = gen_reg_rtx (mode); + emit_insn (gen_sse2_punpckhdq (tmp, vec, vec)); + break; + + default: + gcc_unreachable (); + } + vec = tmp; + use_vec_extr = true; } - break; - default: - abort (); + else + { + /* For SSE1, we have to reuse the V4SF code. */ + ix86_expand_vector_extract (false, gen_lowpart (SFmode, target), + gen_lowpart (V4SFmode, vec), elt); + return; + } + break; + + case V8HImode: + use_vec_extr = TARGET_SSE2; + break; + case V4HImode: + use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A); + break; + + case V16QImode: + case V8QImode: + /* ??? Could extract the appropriate HImode element and shift. */ + default: + break; } -} + if (use_vec_extr) + { + tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt))); + tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp); + + /* Let the rtl optimizers know about the zero extension performed. */ + if (inner_mode == HImode) + { + tmp = gen_rtx_ZERO_EXTEND (SImode, tmp); + target = gen_lowpart (SImode, target); + } + + emit_insn (gen_rtx_SET (VOIDmode, target, tmp)); + } + else + { + rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false); + + emit_move_insn (mem, vec); + + tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode)); + emit_move_insn (target, tmp); + } +} + /* Implements target hook vector_mode_supported_p. */ static bool ix86_vector_mode_supported_p (enum machine_mode mode) diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h index 0a0db2e..d359fd4c 100644 --- a/gcc/config/i386/i386.h +++ b/gcc/config/i386/i386.h @@ -2031,464 +2031,6 @@ do { \ goto LABEL; \ } while (0) -/* Codes for all the SSE/MMX builtins. */ -enum ix86_builtins -{ - IX86_BUILTIN_ADDPS, - IX86_BUILTIN_ADDSS, - IX86_BUILTIN_DIVPS, - IX86_BUILTIN_DIVSS, - IX86_BUILTIN_MULPS, - IX86_BUILTIN_MULSS, - IX86_BUILTIN_SUBPS, - IX86_BUILTIN_SUBSS, - - IX86_BUILTIN_CMPEQPS, - IX86_BUILTIN_CMPLTPS, - IX86_BUILTIN_CMPLEPS, - IX86_BUILTIN_CMPGTPS, - IX86_BUILTIN_CMPGEPS, - IX86_BUILTIN_CMPNEQPS, - IX86_BUILTIN_CMPNLTPS, - IX86_BUILTIN_CMPNLEPS, - IX86_BUILTIN_CMPNGTPS, - IX86_BUILTIN_CMPNGEPS, - IX86_BUILTIN_CMPORDPS, - IX86_BUILTIN_CMPUNORDPS, - IX86_BUILTIN_CMPNEPS, - IX86_BUILTIN_CMPEQSS, - IX86_BUILTIN_CMPLTSS, - IX86_BUILTIN_CMPLESS, - IX86_BUILTIN_CMPNEQSS, - IX86_BUILTIN_CMPNLTSS, - IX86_BUILTIN_CMPNLESS, - IX86_BUILTIN_CMPNGTSS, - IX86_BUILTIN_CMPNGESS, - IX86_BUILTIN_CMPORDSS, - IX86_BUILTIN_CMPUNORDSS, - IX86_BUILTIN_CMPNESS, - - IX86_BUILTIN_COMIEQSS, - IX86_BUILTIN_COMILTSS, - IX86_BUILTIN_COMILESS, - IX86_BUILTIN_COMIGTSS, - IX86_BUILTIN_COMIGESS, - IX86_BUILTIN_COMINEQSS, - IX86_BUILTIN_UCOMIEQSS, - IX86_BUILTIN_UCOMILTSS, - IX86_BUILTIN_UCOMILESS, - IX86_BUILTIN_UCOMIGTSS, - IX86_BUILTIN_UCOMIGESS, - IX86_BUILTIN_UCOMINEQSS, - - IX86_BUILTIN_CVTPI2PS, - IX86_BUILTIN_CVTPS2PI, - IX86_BUILTIN_CVTSI2SS, - IX86_BUILTIN_CVTSI642SS, - IX86_BUILTIN_CVTSS2SI, - IX86_BUILTIN_CVTSS2SI64, - IX86_BUILTIN_CVTTPS2PI, - IX86_BUILTIN_CVTTSS2SI, - IX86_BUILTIN_CVTTSS2SI64, - - IX86_BUILTIN_MAXPS, - IX86_BUILTIN_MAXSS, - IX86_BUILTIN_MINPS, - IX86_BUILTIN_MINSS, - - IX86_BUILTIN_LOADAPS, - IX86_BUILTIN_LOADUPS, - IX86_BUILTIN_STOREAPS, - IX86_BUILTIN_STOREUPS, - IX86_BUILTIN_LOADSS, - IX86_BUILTIN_STORESS, - IX86_BUILTIN_MOVSS, - - IX86_BUILTIN_MOVHLPS, - IX86_BUILTIN_MOVLHPS, - IX86_BUILTIN_LOADHPS, - IX86_BUILTIN_LOADLPS, - IX86_BUILTIN_STOREHPS, - IX86_BUILTIN_STORELPS, - - IX86_BUILTIN_MASKMOVQ, - IX86_BUILTIN_MOVMSKPS, - IX86_BUILTIN_PMOVMSKB, - - IX86_BUILTIN_MOVNTPS, - IX86_BUILTIN_MOVNTQ, - - IX86_BUILTIN_LOADDQA, - IX86_BUILTIN_LOADDQU, - IX86_BUILTIN_STOREDQA, - IX86_BUILTIN_STOREDQU, - IX86_BUILTIN_MOVQ, - IX86_BUILTIN_LOADD, - IX86_BUILTIN_STORED, - - IX86_BUILTIN_CLRTI, - - IX86_BUILTIN_PACKSSWB, - IX86_BUILTIN_PACKSSDW, - IX86_BUILTIN_PACKUSWB, - - IX86_BUILTIN_PADDB, - IX86_BUILTIN_PADDW, - IX86_BUILTIN_PADDD, - IX86_BUILTIN_PADDQ, - IX86_BUILTIN_PADDSB, - IX86_BUILTIN_PADDSW, - IX86_BUILTIN_PADDUSB, - IX86_BUILTIN_PADDUSW, - IX86_BUILTIN_PSUBB, - IX86_BUILTIN_PSUBW, - IX86_BUILTIN_PSUBD, - IX86_BUILTIN_PSUBQ, - IX86_BUILTIN_PSUBSB, - IX86_BUILTIN_PSUBSW, - IX86_BUILTIN_PSUBUSB, - IX86_BUILTIN_PSUBUSW, - - IX86_BUILTIN_PAND, - IX86_BUILTIN_PANDN, - IX86_BUILTIN_POR, - IX86_BUILTIN_PXOR, - - IX86_BUILTIN_PAVGB, - IX86_BUILTIN_PAVGW, - - IX86_BUILTIN_PCMPEQB, - IX86_BUILTIN_PCMPEQW, - IX86_BUILTIN_PCMPEQD, - IX86_BUILTIN_PCMPGTB, - IX86_BUILTIN_PCMPGTW, - IX86_BUILTIN_PCMPGTD, - - IX86_BUILTIN_PEXTRW, - IX86_BUILTIN_PINSRW, - - IX86_BUILTIN_PMADDWD, - - IX86_BUILTIN_PMAXSW, - IX86_BUILTIN_PMAXUB, - IX86_BUILTIN_PMINSW, - IX86_BUILTIN_PMINUB, - - IX86_BUILTIN_PMULHUW, - IX86_BUILTIN_PMULHW, - IX86_BUILTIN_PMULLW, - - IX86_BUILTIN_PSADBW, - IX86_BUILTIN_PSHUFW, - - IX86_BUILTIN_PSLLW, - IX86_BUILTIN_PSLLD, - IX86_BUILTIN_PSLLQ, - IX86_BUILTIN_PSRAW, - IX86_BUILTIN_PSRAD, - IX86_BUILTIN_PSRLW, - IX86_BUILTIN_PSRLD, - IX86_BUILTIN_PSRLQ, - IX86_BUILTIN_PSLLWI, - IX86_BUILTIN_PSLLDI, - IX86_BUILTIN_PSLLQI, - IX86_BUILTIN_PSRAWI, - IX86_BUILTIN_PSRADI, - IX86_BUILTIN_PSRLWI, - IX86_BUILTIN_PSRLDI, - IX86_BUILTIN_PSRLQI, - - IX86_BUILTIN_PUNPCKHBW, - IX86_BUILTIN_PUNPCKHWD, - IX86_BUILTIN_PUNPCKHDQ, - IX86_BUILTIN_PUNPCKLBW, - IX86_BUILTIN_PUNPCKLWD, - IX86_BUILTIN_PUNPCKLDQ, - - IX86_BUILTIN_SHUFPS, - - IX86_BUILTIN_RCPPS, - IX86_BUILTIN_RCPSS, - IX86_BUILTIN_RSQRTPS, - IX86_BUILTIN_RSQRTSS, - IX86_BUILTIN_SQRTPS, - IX86_BUILTIN_SQRTSS, - - IX86_BUILTIN_UNPCKHPS, - IX86_BUILTIN_UNPCKLPS, - - IX86_BUILTIN_ANDPS, - IX86_BUILTIN_ANDNPS, - IX86_BUILTIN_ORPS, - IX86_BUILTIN_XORPS, - - IX86_BUILTIN_EMMS, - IX86_BUILTIN_LDMXCSR, - IX86_BUILTIN_STMXCSR, - IX86_BUILTIN_SFENCE, - - /* 3DNow! Original */ - IX86_BUILTIN_FEMMS, - IX86_BUILTIN_PAVGUSB, - IX86_BUILTIN_PF2ID, - IX86_BUILTIN_PFACC, - IX86_BUILTIN_PFADD, - IX86_BUILTIN_PFCMPEQ, - IX86_BUILTIN_PFCMPGE, - IX86_BUILTIN_PFCMPGT, - IX86_BUILTIN_PFMAX, - IX86_BUILTIN_PFMIN, - IX86_BUILTIN_PFMUL, - IX86_BUILTIN_PFRCP, - IX86_BUILTIN_PFRCPIT1, - IX86_BUILTIN_PFRCPIT2, - IX86_BUILTIN_PFRSQIT1, - IX86_BUILTIN_PFRSQRT, - IX86_BUILTIN_PFSUB, - IX86_BUILTIN_PFSUBR, - IX86_BUILTIN_PI2FD, - IX86_BUILTIN_PMULHRW, - - /* 3DNow! Athlon Extensions */ - IX86_BUILTIN_PF2IW, - IX86_BUILTIN_PFNACC, - IX86_BUILTIN_PFPNACC, - IX86_BUILTIN_PI2FW, - IX86_BUILTIN_PSWAPDSI, - IX86_BUILTIN_PSWAPDSF, - - IX86_BUILTIN_SSE_ZERO, - IX86_BUILTIN_MMX_ZERO, - - /* SSE2 */ - IX86_BUILTIN_ADDPD, - IX86_BUILTIN_ADDSD, - IX86_BUILTIN_DIVPD, - IX86_BUILTIN_DIVSD, - IX86_BUILTIN_MULPD, - IX86_BUILTIN_MULSD, - IX86_BUILTIN_SUBPD, - IX86_BUILTIN_SUBSD, - - IX86_BUILTIN_CMPEQPD, - IX86_BUILTIN_CMPLTPD, - IX86_BUILTIN_CMPLEPD, - IX86_BUILTIN_CMPGTPD, - IX86_BUILTIN_CMPGEPD, - IX86_BUILTIN_CMPNEQPD, - IX86_BUILTIN_CMPNLTPD, - IX86_BUILTIN_CMPNLEPD, - IX86_BUILTIN_CMPNGTPD, - IX86_BUILTIN_CMPNGEPD, - IX86_BUILTIN_CMPORDPD, - IX86_BUILTIN_CMPUNORDPD, - IX86_BUILTIN_CMPNEPD, - IX86_BUILTIN_CMPEQSD, - IX86_BUILTIN_CMPLTSD, - IX86_BUILTIN_CMPLESD, - IX86_BUILTIN_CMPNEQSD, - IX86_BUILTIN_CMPNLTSD, - IX86_BUILTIN_CMPNLESD, - IX86_BUILTIN_CMPORDSD, - IX86_BUILTIN_CMPUNORDSD, - IX86_BUILTIN_CMPNESD, - - IX86_BUILTIN_COMIEQSD, - IX86_BUILTIN_COMILTSD, - IX86_BUILTIN_COMILESD, - IX86_BUILTIN_COMIGTSD, - IX86_BUILTIN_COMIGESD, - IX86_BUILTIN_COMINEQSD, - IX86_BUILTIN_UCOMIEQSD, - IX86_BUILTIN_UCOMILTSD, - IX86_BUILTIN_UCOMILESD, - IX86_BUILTIN_UCOMIGTSD, - IX86_BUILTIN_UCOMIGESD, - IX86_BUILTIN_UCOMINEQSD, - - IX86_BUILTIN_MAXPD, - IX86_BUILTIN_MAXSD, - IX86_BUILTIN_MINPD, - IX86_BUILTIN_MINSD, - - IX86_BUILTIN_ANDPD, - IX86_BUILTIN_ANDNPD, - IX86_BUILTIN_ORPD, - IX86_BUILTIN_XORPD, - - IX86_BUILTIN_SQRTPD, - IX86_BUILTIN_SQRTSD, - - IX86_BUILTIN_UNPCKHPD, - IX86_BUILTIN_UNPCKLPD, - - IX86_BUILTIN_SHUFPD, - - IX86_BUILTIN_LOADAPD, - IX86_BUILTIN_LOADUPD, - IX86_BUILTIN_STOREAPD, - IX86_BUILTIN_STOREUPD, - IX86_BUILTIN_LOADSD, - IX86_BUILTIN_STORESD, - IX86_BUILTIN_MOVSD, - - IX86_BUILTIN_LOADHPD, - IX86_BUILTIN_LOADLPD, - IX86_BUILTIN_STOREHPD, - IX86_BUILTIN_STORELPD, - - IX86_BUILTIN_CVTDQ2PD, - IX86_BUILTIN_CVTDQ2PS, - - IX86_BUILTIN_CVTPD2DQ, - IX86_BUILTIN_CVTPD2PI, - IX86_BUILTIN_CVTPD2PS, - IX86_BUILTIN_CVTTPD2DQ, - IX86_BUILTIN_CVTTPD2PI, - - IX86_BUILTIN_CVTPI2PD, - IX86_BUILTIN_CVTSI2SD, - IX86_BUILTIN_CVTSI642SD, - - IX86_BUILTIN_CVTSD2SI, - IX86_BUILTIN_CVTSD2SI64, - IX86_BUILTIN_CVTSD2SS, - IX86_BUILTIN_CVTSS2SD, - IX86_BUILTIN_CVTTSD2SI, - IX86_BUILTIN_CVTTSD2SI64, - - IX86_BUILTIN_CVTPS2DQ, - IX86_BUILTIN_CVTPS2PD, - IX86_BUILTIN_CVTTPS2DQ, - - IX86_BUILTIN_MOVNTI, - IX86_BUILTIN_MOVNTPD, - IX86_BUILTIN_MOVNTDQ, - - IX86_BUILTIN_SETPD1, - IX86_BUILTIN_SETPD, - IX86_BUILTIN_CLRPD, - IX86_BUILTIN_SETRPD, - IX86_BUILTIN_LOADPD1, - IX86_BUILTIN_LOADRPD, - IX86_BUILTIN_STOREPD1, - IX86_BUILTIN_STORERPD, - - /* SSE2 MMX */ - IX86_BUILTIN_MASKMOVDQU, - IX86_BUILTIN_MOVMSKPD, - IX86_BUILTIN_PMOVMSKB128, - IX86_BUILTIN_MOVQ2DQ, - IX86_BUILTIN_MOVDQ2Q, - - IX86_BUILTIN_PACKSSWB128, - IX86_BUILTIN_PACKSSDW128, - IX86_BUILTIN_PACKUSWB128, - - IX86_BUILTIN_PADDB128, - IX86_BUILTIN_PADDW128, - IX86_BUILTIN_PADDD128, - IX86_BUILTIN_PADDQ128, - IX86_BUILTIN_PADDSB128, - IX86_BUILTIN_PADDSW128, - IX86_BUILTIN_PADDUSB128, - IX86_BUILTIN_PADDUSW128, - IX86_BUILTIN_PSUBB128, - IX86_BUILTIN_PSUBW128, - IX86_BUILTIN_PSUBD128, - IX86_BUILTIN_PSUBQ128, - IX86_BUILTIN_PSUBSB128, - IX86_BUILTIN_PSUBSW128, - IX86_BUILTIN_PSUBUSB128, - IX86_BUILTIN_PSUBUSW128, - - IX86_BUILTIN_PAND128, - IX86_BUILTIN_PANDN128, - IX86_BUILTIN_POR128, - IX86_BUILTIN_PXOR128, - - IX86_BUILTIN_PAVGB128, - IX86_BUILTIN_PAVGW128, - - IX86_BUILTIN_PCMPEQB128, - IX86_BUILTIN_PCMPEQW128, - IX86_BUILTIN_PCMPEQD128, - IX86_BUILTIN_PCMPGTB128, - IX86_BUILTIN_PCMPGTW128, - IX86_BUILTIN_PCMPGTD128, - - IX86_BUILTIN_PEXTRW128, - IX86_BUILTIN_PINSRW128, - - IX86_BUILTIN_PMADDWD128, - - IX86_BUILTIN_PMAXSW128, - IX86_BUILTIN_PMAXUB128, - IX86_BUILTIN_PMINSW128, - IX86_BUILTIN_PMINUB128, - - IX86_BUILTIN_PMULUDQ, - IX86_BUILTIN_PMULUDQ128, - IX86_BUILTIN_PMULHUW128, - IX86_BUILTIN_PMULHW128, - IX86_BUILTIN_PMULLW128, - - IX86_BUILTIN_PSADBW128, - IX86_BUILTIN_PSHUFHW, - IX86_BUILTIN_PSHUFLW, - IX86_BUILTIN_PSHUFD, - - IX86_BUILTIN_PSLLW128, - IX86_BUILTIN_PSLLD128, - IX86_BUILTIN_PSLLQ128, - IX86_BUILTIN_PSRAW128, - IX86_BUILTIN_PSRAD128, - IX86_BUILTIN_PSRLW128, - IX86_BUILTIN_PSRLD128, - IX86_BUILTIN_PSRLQ128, - IX86_BUILTIN_PSLLDQI128, - IX86_BUILTIN_PSLLWI128, - IX86_BUILTIN_PSLLDI128, - IX86_BUILTIN_PSLLQI128, - IX86_BUILTIN_PSRAWI128, - IX86_BUILTIN_PSRADI128, - IX86_BUILTIN_PSRLDQI128, - IX86_BUILTIN_PSRLWI128, - IX86_BUILTIN_PSRLDI128, - IX86_BUILTIN_PSRLQI128, - - IX86_BUILTIN_PUNPCKHBW128, - IX86_BUILTIN_PUNPCKHWD128, - IX86_BUILTIN_PUNPCKHDQ128, - IX86_BUILTIN_PUNPCKHQDQ128, - IX86_BUILTIN_PUNPCKLBW128, - IX86_BUILTIN_PUNPCKLWD128, - IX86_BUILTIN_PUNPCKLDQ128, - IX86_BUILTIN_PUNPCKLQDQ128, - - IX86_BUILTIN_CLFLUSH, - IX86_BUILTIN_MFENCE, - IX86_BUILTIN_LFENCE, - - /* Prescott New Instructions. */ - IX86_BUILTIN_ADDSUBPS, - IX86_BUILTIN_HADDPS, - IX86_BUILTIN_HSUBPS, - IX86_BUILTIN_MOVSHDUP, - IX86_BUILTIN_MOVSLDUP, - IX86_BUILTIN_ADDSUBPD, - IX86_BUILTIN_HADDPD, - IX86_BUILTIN_HSUBPD, - IX86_BUILTIN_LOADDDUP, - IX86_BUILTIN_MOVDDUP, - IX86_BUILTIN_LDDQU, - - IX86_BUILTIN_MONITOR, - IX86_BUILTIN_MWAIT, - - IX86_BUILTIN_MAX -}; - /* Max number of args passed in registers. If this is more than 3, we will have problems with ebx (register #4), since it is a caller save register and is also used as the pic register in ELF. So for now, don't allow more than diff --git a/gcc/config/i386/mmintrin.h b/gcc/config/i386/mmintrin.h index ab0eb85..4f2af6d 100644 --- a/gcc/config/i386/mmintrin.h +++ b/gcc/config/i386/mmintrin.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. +/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc. This file is part of GCC. @@ -818,35 +818,21 @@ _m_pcmpgtd (__m64 __m1, __m64 __m2) static __inline __m64 _mm_setzero_si64 (void) { - return (__m64)__builtin_ia32_mmx_zero (); + return (__m64)0LL; } /* Creates a vector of two 32-bit values; I0 is least significant. */ static __inline __m64 _mm_set_pi32 (int __i1, int __i0) { - union { - __m64 __q; - struct { - unsigned int __i0; - unsigned int __i1; - } __s; - } __u; - - __u.__s.__i0 = __i0; - __u.__s.__i1 = __i1; - - return __u.__q; + return (__m64) __builtin_ia32_vec_init_v2si (__i0, __i1); } /* Creates a vector of four 16-bit values; W0 is least significant. */ static __inline __m64 _mm_set_pi16 (short __w3, short __w2, short __w1, short __w0) { - unsigned int __i1 = (unsigned short)__w3 << 16 | (unsigned short)__w2; - unsigned int __i0 = (unsigned short)__w1 << 16 | (unsigned short)__w0; - return _mm_set_pi32 (__i1, __i0); - + return (__m64) __builtin_ia32_vec_init_v4hi (__w0, __w1, __w2, __w3); } /* Creates a vector of eight 8-bit values; B0 is least significant. */ @@ -854,19 +840,8 @@ static __inline __m64 _mm_set_pi8 (char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0) { - unsigned int __i1, __i0; - - __i1 = (unsigned char)__b7; - __i1 = __i1 << 8 | (unsigned char)__b6; - __i1 = __i1 << 8 | (unsigned char)__b5; - __i1 = __i1 << 8 | (unsigned char)__b4; - - __i0 = (unsigned char)__b3; - __i0 = __i0 << 8 | (unsigned char)__b2; - __i0 = __i0 << 8 | (unsigned char)__b1; - __i0 = __i0 << 8 | (unsigned char)__b0; - - return _mm_set_pi32 (__i1, __i0); + return (__m64) __builtin_ia32_vec_init_v8qi (__b0, __b1, __b2, __b3, + __b4, __b5, __b6, __b7); } /* Similar, but with the arguments in reverse order. */ @@ -900,17 +875,14 @@ _mm_set1_pi32 (int __i) static __inline __m64 _mm_set1_pi16 (short __w) { - unsigned int __i = (unsigned short)__w << 16 | (unsigned short)__w; - return _mm_set1_pi32 (__i); + return _mm_set_pi16 (__w, __w, __w, __w); } /* Creates a vector of eight 8-bit values, all elements containing B. */ static __inline __m64 _mm_set1_pi8 (char __b) { - unsigned int __w = (unsigned char)__b << 8 | (unsigned char)__b; - unsigned int __i = __w << 16 | __w; - return _mm_set1_pi32 (__i); + return _mm_set_pi8 (__b, __b, __b, __b, __b, __b, __b, __b); } #endif /* __MMX__ */ diff --git a/gcc/config/i386/mmx.md b/gcc/config/i386/mmx.md index b4d06e9..2f710a4 100644 --- a/gcc/config/i386/mmx.md +++ b/gcc/config/i386/mmx.md @@ -87,9 +87,9 @@ (define_insn "*mov_internal" [(set (match_operand:MMXMODEI 0 "nonimmediate_operand" - "=*y,*y ,m ,*y,*Y,*Y,*Y ,m ,*x,*x,*x,m") + "=*y,*y ,m ,*y,*Y,*Y,*Y ,m ,*x,*x,*x,m ,?r ,?m") (match_operand:MMXMODEI 1 "vector_move_operand" - "C ,*ym,*y,*Y,*y,C ,*Ym,*Y,C ,*x,m ,*x"))] + "C ,*ym,*y,*Y,*y,C ,*Ym,*Y,C ,*x,m ,*x,irm,r"))] "TARGET_MMX && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)" "@ @@ -104,9 +104,11 @@ xorps\t%0, %0 movaps\t{%1, %0|%0, %1} movlps\t{%1, %0|%0, %1} - movlps\t{%1, %0|%0, %1}" - [(set_attr "type" "mmxmov,mmxmov,mmxmov,ssecvt,ssecvt,ssemov,ssemov,ssemov,ssemov,ssemov,ssemov,ssemov") - (set_attr "mode" "DI,DI,DI,DI,DI,TI,DI,DI,V4SF,V4SF,V2SF,V2SF")]) + movlps\t{%1, %0|%0, %1} + # + #" + [(set_attr "type" "mmxmov,mmxmov,mmxmov,ssecvt,ssecvt,ssemov,ssemov,ssemov,ssemov,ssemov,ssemov,ssemov,*,*") + (set_attr "mode" "DI,DI,DI,DI,DI,TI,DI,DI,V4SF,V4SF,V2SF,V2SF,DI,DI")]) (define_expand "movv2sf" [(set (match_operand:V2SF 0 "nonimmediate_operand" "") @@ -142,9 +144,9 @@ (define_insn "*movv2sf_internal" [(set (match_operand:V2SF 0 "nonimmediate_operand" - "=*y,*y ,m,*y,*Y,*x,*x ,m") + "=*y,*y ,m,*y,*Y,*x,*x ,m ,?r ,?m") (match_operand:V2SF 1 "vector_move_operand" - "C ,*ym,*y,*Y,*y,C ,*xm,*x"))] + "C ,*ym,*y,*Y,*y,C ,*xm,*x,irm,r"))] "TARGET_MMX && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)" "@ @@ -155,9 +157,21 @@ movq2dq\t{%1, %0|%0, %1} xorps\t%0, %0 movlps\t{%1, %0|%0, %1} - movlps\t{%1, %0|%0, %1}" - [(set_attr "type" "mmxmov,mmxmov,mmxmov,ssecvt,ssecvt,ssemov,ssemov,ssemov") - (set_attr "mode" "DI,DI,DI,DI,DI,V4SF,V2SF,V2SF")]) + movlps\t{%1, %0|%0, %1} + # + #" + [(set_attr "type" "mmxmov,mmxmov,mmxmov,ssecvt,ssecvt,ssemov,ssemov,ssemov,*,*") + (set_attr "mode" "DI,DI,DI,DI,DI,V4SF,V2SF,V2SF,DI,DI")]) + +;; %%% This multiword shite has got to go. +(define_split + [(set (match_operand:MMXMODE 0 "nonimmediate_operand" "") + (match_operand:MMXMODE 1 "general_operand" ""))] + "!TARGET_64BIT && reload_completed + && (!MMX_REG_P (operands[0]) && !SSE_REG_P (operands[0])) + && (!MMX_REG_P (operands[1]) && !SSE_REG_P (operands[1]))" + [(const_int 0)] + "ix86_split_long_move (operands); DONE;") (define_expand "movmisalign" [(set (match_operand:MMXMODE 0 "nonimmediate_operand" "") @@ -426,6 +440,58 @@ [(set_attr "type" "mmxcvt") (set_attr "mode" "V2SF")]) +(define_insn "*vec_dupv2sf" + [(set (match_operand:V2SF 0 "register_operand" "=y") + (vec_duplicate:V2SF + (match_operand:SF 1 "register_operand" "0")))] + "TARGET_MMX" + "punpckldq\t%0, %0" + [(set_attr "type" "mmxcvt") + (set_attr "mode" "DI")]) + +(define_insn "*mmx_concatv2sf" + [(set (match_operand:V2SF 0 "register_operand" "=y,y") + (vec_concat:V2SF + (match_operand:SF 1 "nonimmediate_operand" " 0,rm") + (match_operand:SF 2 "vector_move_operand" "ym,C")))] + "TARGET_MMX && !TARGET_SSE" + "@ + punpckldq\t{%2, %0|%0, %2} + movd\t{%1, %0|%0, %1}" + [(set_attr "type" "mmxcvt,mmxmov") + (set_attr "mode" "DI")]) + +(define_expand "vec_setv2sf" + [(match_operand:V2SF 0 "register_operand" "") + (match_operand:SF 1 "register_operand" "") + (match_operand 2 "const_int_operand" "")] + "TARGET_MMX" +{ + ix86_expand_vector_set (false, operands[0], operands[1], + INTVAL (operands[2])); + DONE; +}) + +(define_expand "vec_extractv2sf" + [(match_operand:SF 0 "register_operand" "") + (match_operand:V2SF 1 "register_operand" "") + (match_operand 2 "const_int_operand" "")] + "TARGET_MMX" +{ + ix86_expand_vector_extract (false, operands[0], operands[1], + INTVAL (operands[2])); + DONE; +}) + +(define_expand "vec_initv2sf" + [(match_operand:V2SF 0 "register_operand" "") + (match_operand 1 "" "")] + "TARGET_SSE" +{ + ix86_expand_vector_init (false, operands[0], operands[1]); + DONE; +}) + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Parallel integral arithmetic @@ -902,9 +968,9 @@ (define_expand "mmx_pinsrw" [(set (match_operand:V4HI 0 "register_operand" "") (vec_merge:V4HI - (match_operand:V4HI 1 "register_operand" "") (vec_duplicate:V4HI (match_operand:SI 2 "nonimmediate_operand" "")) + (match_operand:V4HI 1 "register_operand" "") (match_operand:SI 3 "const_0_to_3_operand" "")))] "TARGET_SSE || TARGET_3DNOW_A" { @@ -915,9 +981,9 @@ (define_insn "*mmx_pinsrw" [(set (match_operand:V4HI 0 "register_operand" "=y") (vec_merge:V4HI - (match_operand:V4HI 1 "register_operand" "0") (vec_duplicate:V4HI (match_operand:HI 2 "nonimmediate_operand" "rm")) + (match_operand:V4HI 1 "register_operand" "0") (match_operand:SI 3 "const_pow2_1_to_8_operand" "n")))] "TARGET_SSE || TARGET_3DNOW_A" { @@ -938,7 +1004,6 @@ [(set_attr "type" "mmxcvt") (set_attr "mode" "DI")]) - (define_expand "mmx_pshufw" [(match_operand:V4HI 0 "register_operand" "") (match_operand:V4HI 1 "nonimmediate_operand" "") @@ -986,6 +1051,130 @@ [(set_attr "type" "mmxcvt") (set_attr "mode" "DI")]) +(define_insn "*vec_dupv4hi" + [(set (match_operand:V4HI 0 "register_operand" "=y") + (vec_duplicate:V4HI + (truncate:HI + (match_operand:SI 1 "register_operand" "0"))))] + "TARGET_MMX" + "pshufw\t{$0, %0, %0|%0, %0, 0}" + [(set_attr "type" "mmxcvt") + (set_attr "mode" "DI")]) + +(define_insn "*vec_dupv2si" + [(set (match_operand:V2SI 0 "register_operand" "=y") + (vec_duplicate:V2SI + (match_operand:SI 1 "register_operand" "0")))] + "TARGET_MMX" + "punpckldq\t%0, %0" + [(set_attr "type" "mmxcvt") + (set_attr "mode" "DI")]) + +(define_insn "*mmx_concatv2si" + [(set (match_operand:V2SI 0 "register_operand" "=y,y") + (vec_concat:V2SI + (match_operand:SI 1 "nonimmediate_operand" " 0,rm") + (match_operand:SI 2 "vector_move_operand" "ym,C")))] + "TARGET_MMX && !TARGET_SSE" + "@ + punpckldq\t{%2, %0|%0, %2} + movd\t{%1, %0|%0, %1}" + [(set_attr "type" "mmxcvt,mmxmov") + (set_attr "mode" "DI")]) + +(define_expand "vec_setv2si" + [(match_operand:V2SI 0 "register_operand" "") + (match_operand:SI 1 "register_operand" "") + (match_operand 2 "const_int_operand" "")] + "TARGET_MMX" +{ + ix86_expand_vector_set (false, operands[0], operands[1], + INTVAL (operands[2])); + DONE; +}) + +(define_expand "vec_extractv2si" + [(match_operand:SI 0 "register_operand" "") + (match_operand:V2SI 1 "register_operand" "") + (match_operand 2 "const_int_operand" "")] + "TARGET_MMX" +{ + ix86_expand_vector_extract (false, operands[0], operands[1], + INTVAL (operands[2])); + DONE; +}) + +(define_expand "vec_initv2si" + [(match_operand:V2SI 0 "register_operand" "") + (match_operand 1 "" "")] + "TARGET_SSE" +{ + ix86_expand_vector_init (false, operands[0], operands[1]); + DONE; +}) + +(define_expand "vec_setv4hi" + [(match_operand:V4HI 0 "register_operand" "") + (match_operand:HI 1 "register_operand" "") + (match_operand 2 "const_int_operand" "")] + "TARGET_MMX" +{ + ix86_expand_vector_set (false, operands[0], operands[1], + INTVAL (operands[2])); + DONE; +}) + +(define_expand "vec_extractv4hi" + [(match_operand:HI 0 "register_operand" "") + (match_operand:V4HI 1 "register_operand" "") + (match_operand 2 "const_int_operand" "")] + "TARGET_MMX" +{ + ix86_expand_vector_extract (false, operands[0], operands[1], + INTVAL (operands[2])); + DONE; +}) + +(define_expand "vec_initv4hi" + [(match_operand:V4HI 0 "register_operand" "") + (match_operand 1 "" "")] + "TARGET_SSE" +{ + ix86_expand_vector_init (false, operands[0], operands[1]); + DONE; +}) + +(define_expand "vec_setv8qi" + [(match_operand:V8QI 0 "register_operand" "") + (match_operand:QI 1 "register_operand" "") + (match_operand 2 "const_int_operand" "")] + "TARGET_MMX" +{ + ix86_expand_vector_set (false, operands[0], operands[1], + INTVAL (operands[2])); + DONE; +}) + +(define_expand "vec_extractv8qi" + [(match_operand:QI 0 "register_operand" "") + (match_operand:V8QI 1 "register_operand" "") + (match_operand 2 "const_int_operand" "")] + "TARGET_MMX" +{ + ix86_expand_vector_extract (false, operands[0], operands[1], + INTVAL (operands[2])); + DONE; +}) + +(define_expand "vec_initv8qi" + [(match_operand:V8QI 0 "register_operand" "") + (match_operand 1 "" "")] + "TARGET_SSE" +{ + ix86_expand_vector_init (false, operands[0], operands[1]); + DONE; +}) + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Miscellaneous diff --git a/gcc/config/i386/pmmintrin.h b/gcc/config/i386/pmmintrin.h index 50db2bf..26c1f0f 100644 --- a/gcc/config/i386/pmmintrin.h +++ b/gcc/config/i386/pmmintrin.h @@ -95,13 +95,13 @@ _mm_hsub_pd (__m128d __X, __m128d __Y) static __inline __m128d _mm_loaddup_pd (double const *__P) { - return (__m128d) __builtin_ia32_loadddup (__P); + return _mm_load1_pd (__P); } static __inline __m128d _mm_movedup_pd (__m128d __X) { - return (__m128d) __builtin_ia32_movddup ((__v2df)__X); + return _mm_shuffle_pd (__X, __X, _MM_SHUFFLE2 (0,0)); } static __inline __m128i diff --git a/gcc/config/i386/predicates.md b/gcc/config/i386/predicates.md index fde85dd..73352b9 100644 --- a/gcc/config/i386/predicates.md +++ b/gcc/config/i386/predicates.md @@ -641,6 +641,11 @@ (ior (match_operand 0 "nonimmediate_operand") (match_operand 0 "const0_operand"))) +;; Return true if OP is a register or a zero. +(define_predicate "reg_or_0_operand" + (ior (match_operand 0 "register_operand") + (match_operand 0 "const0_operand"))) + ;; Return true if op if a valid address, and does not contain ;; a segment override. (define_special_predicate "no_seg_address_operand" diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md index 7821744..0f5ed7f 100644 --- a/gcc/config/i386/sse.md +++ b/gcc/config/i386/sse.md @@ -123,11 +123,14 @@ [(set (match_operand:V4SF 0 "register_operand" "") (match_operand:V4SF 1 "zero_extended_scalar_load_operand" ""))] "TARGET_SSE && reload_completed" - [(const_int 0)] + [(set (match_dup 0) + (vec_merge:V4SF + (vec_duplicate:V4SF (match_dup 1)) + (match_dup 2) + (const_int 1)))] { - rtx x = simplify_gen_subreg (SFmode, operands[1], V4SFmode, 0); - emit_insn (gen_sse_loadss (operands[0], x)); - DONE; + operands[1] = simplify_gen_subreg (SFmode, operands[1], V4SFmode, 0); + operands[2] = CONST0_RTX (V4SFmode); }) (define_expand "movv2df" @@ -185,11 +188,10 @@ [(set (match_operand:V2DF 0 "register_operand" "") (match_operand:V2DF 1 "zero_extended_scalar_load_operand" ""))] "TARGET_SSE2 && reload_completed" - [(const_int 0)] + [(set (match_dup 0) (vec_concat:V2DF (match_dup 1) (match_dup 2)))] { - rtx x = simplify_gen_subreg (DFmode, operands[1], V2DFmode, 0); - emit_insn (gen_sse2_loadsd (operands[0], x)); - DONE; + operands[1] = simplify_gen_subreg (DFmode, operands[1], V2DFmode, 0); + operands[2] = CONST0_RTX (DFmode); }) (define_expand "movmisalign" @@ -1041,16 +1043,63 @@ [(set_attr "type" "sselog,ssemov,ssemov") (set_attr "mode" "V4SF,V2SF,V2SF")]) -(define_expand "sse_loadss" - [(set (match_operand:V4SF 0 "nonimmediate_operand" "") +(define_insn "sse_movss" + [(set (match_operand:V4SF 0 "register_operand" "=x") (vec_merge:V4SF - (vec_duplicate:V4SF (match_operand:SF 1 "nonimmediate_operand" "")) - (match_dup 2) + (match_operand:V4SF 2 "register_operand" "x") + (match_operand:V4SF 1 "register_operand" "0") (const_int 1)))] "TARGET_SSE" - "operands[2] = CONST0_RTX (V4SFmode);") + "movss\t{%2, %0|%0, %2}" + [(set_attr "type" "ssemov") + (set_attr "mode" "SF")]) + +(define_insn "*vec_dupv4sf" + [(set (match_operand:V4SF 0 "register_operand" "=x") + (vec_duplicate:V4SF + (match_operand:SF 1 "register_operand" "0")))] + "TARGET_SSE" + "shufps\t{$0, %0, %0|%0, %0, 0}" + [(set_attr "type" "sselog1") + (set_attr "mode" "V4SF")]) + +;; ??? In theory we can match memory for the MMX alternative, but allowing +;; nonimmediate_operand for operand 2 and *not* allowing memory for the SSE +;; alternatives pretty much forces the MMX alternative to be chosen. +(define_insn "*sse_concatv2sf" + [(set (match_operand:V2SF 0 "register_operand" "=x,*y") + (vec_concat:V2SF + (match_operand:SF 1 "register_operand" " 0, 0") + (match_operand:SF 2 "register_operand" " x,*y")))] + "TARGET_SSE" + "@ + unpcklps\t{%2, %0|%0, %2} + punpckldq\t{%2, %0|%0, %2}" + [(set_attr "type" "sselog,mmxcvt") + (set_attr "mode" "V4SF,DI")]) + +(define_insn "*sse_concatv4sf" + [(set (match_operand:V4SF 0 "register_operand" "=x,x") + (vec_concat:V4SF + (match_operand:V2SF 1 "register_operand" " 0,0") + (match_operand:V2SF 2 "nonimmediate_operand" " x,m")))] + "TARGET_SSE" + "@ + movlhps\t{%2, %0|%0, %2} + movhps\t{%2, %0|%0, %2}" + [(set_attr "type" "ssemov") + (set_attr "mode" "V4SF,V2SF")]) + +(define_expand "vec_initv4sf" + [(match_operand:V4SF 0 "register_operand" "") + (match_operand 1 "" "")] + "TARGET_SSE" +{ + ix86_expand_vector_init (false, operands[0], operands[1]); + DONE; +}) -(define_insn "sse_loadlss" +(define_insn "*vec_setv4sf_0" [(set (match_operand:V4SF 0 "nonimmediate_operand" "=x,x,Y ,m") (vec_merge:V4SF (vec_duplicate:V4SF @@ -1080,18 +1129,18 @@ DONE; }) -(define_insn "sse_movss" - [(set (match_operand:V4SF 0 "register_operand" "=x") - (vec_merge:V4SF - (match_operand:V4SF 2 "register_operand" "x") - (match_operand:V4SF 1 "register_operand" "0") - (const_int 1)))] +(define_expand "vec_setv4sf" + [(match_operand:V4SF 0 "register_operand" "") + (match_operand:SF 1 "register_operand" "") + (match_operand 2 "const_int_operand" "")] "TARGET_SSE" - "movss\t{%2, %0|%0, %2}" - [(set_attr "type" "ssemov") - (set_attr "mode" "SF")]) +{ + ix86_expand_vector_set (false, operands[0], operands[1], + INTVAL (operands[2])); + DONE; +}) -(define_insn_and_split "sse_storess" +(define_insn_and_split "*vec_extractv4sf_0" [(set (match_operand:SF 0 "nonimmediate_operand" "=x,m,fr") (vec_select:SF (match_operand:V4SF 1 "nonimmediate_operand" "xm,x,m") @@ -1101,56 +1150,12 @@ "&& reload_completed" [(const_int 0)] { - emit_move_insn (operands[0], gen_lowpart (SFmode, operands[1])); - DONE; -}) - -(define_expand "vec_setv4sf" - [(match_operand:V4SF 0 "register_operand" "") - (match_operand:SF 1 "register_operand" "") - (match_operand 2 "const_int_operand" "")] - "TARGET_SSE" -{ - rtx tmp, op0 = operands[0], op1 = operands[1]; - - switch (INTVAL (operands[2])) - { - case 0: - emit_insn (gen_sse_loadlss (op0, op0, op1)); - break; - - case 1: - /* tmp = op0 = A B C D */ - tmp = copy_to_reg (op0); - - /* op0 = C C D D */ - emit_insn (gen_sse_unpcklps (op0, op0, op0)); - - /* op0 = C C D X */ - emit_insn (gen_sse_loadlss (op0, op0, op1)); - - /* op0 = A B X D */ - emit_insn (gen_sse_shufps_1 (op0, op0, tmp, GEN_INT (1), GEN_INT (0), - GEN_INT (2), GEN_INT (3))); - break; - - case 2: - tmp = copy_to_reg (op0); - emit_insn (gen_sse_loadlss (op0, op0, op1)); - emit_insn (gen_sse_shufps_1 (op0, op0, tmp, GEN_INT (0), GEN_INT (1), - GEN_INT (0), GEN_INT (3))); - break; - - case 3: - tmp = copy_to_reg (op0); - emit_insn (gen_sse_loadlss (op0, op0, op1)); - emit_insn (gen_sse_shufps_1 (op0, op0, tmp, GEN_INT (0), GEN_INT (1), - GEN_INT (2), GEN_INT (0))); - break; - - default: - abort (); - } + rtx op1 = operands[1]; + if (REG_P (op1)) + op1 = gen_rtx_REG (SFmode, REGNO (op1)); + else + op1 = gen_lowpart (SFmode, op1); + emit_move_insn (operands[0], op1); DONE; }) @@ -1160,49 +1165,11 @@ (match_operand 2 "const_int_operand" "")] "TARGET_SSE" { - rtx tmp, op0 = operands[0], op1 = operands[1]; - - switch (INTVAL (operands[2])) - { - case 0: - tmp = op1; - break; - - case 1: - tmp = copy_to_reg (op1); - emit_insn (gen_sse_shufps_1 (tmp, tmp, tmp, GEN_INT (1), GEN_INT (1), - GEN_INT (2), GEN_INT (3))); - break; - - case 2: - tmp = copy_to_reg (op1); - emit_insn (gen_sse_unpckhps (tmp, tmp, tmp)); - break; - - case 3: - tmp = copy_to_reg (op1); - emit_insn (gen_sse_shufps_1 (tmp, tmp, tmp, GEN_INT (3), GEN_INT (1), - GEN_INT (2), GEN_INT (3))); - break; - - default: - abort (); - } - - emit_insn (gen_sse_storess (op0, op1)); + ix86_expand_vector_extract (false, operands[0], operands[1], + INTVAL (operands[2])); DONE; }) -(define_expand "vec_initv4sf" - [(match_operand:V4SF 0 "register_operand" "") - (match_operand 1 "" "")] - "TARGET_SSE" -{ - ix86_expand_vector_init (operands[0], operands[1]); - DONE; -}) - - ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Parallel double-precision floating point arithmetic @@ -1814,7 +1781,7 @@ [(set_attr "type" "sselog,ssemov,ssemov") (set_attr "mode" "V2DF,V1DF,V1DF")]) -(define_insn "sse3_movddup" +(define_insn "*sse3_movddup" [(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,o") (vec_select:V2DF (vec_concat:V4DF @@ -1840,7 +1807,7 @@ "TARGET_SSE3 && reload_completed" [(const_int 0)] { - rtx low = gen_lowpart (DFmode, operands[1]); + rtx low = gen_rtx_REG (DFmode, REGNO (operands[1])); emit_move_insn (adjust_address (operands[0], DFmode, 0), low); emit_move_insn (adjust_address (operands[0], DFmode, 8), low); DONE; @@ -1939,10 +1906,15 @@ (match_operand:V2DF 1 "nonimmediate_operand" "") (parallel [(const_int 0)])))] "TARGET_SSE2 && reload_completed" - [(set (match_dup 0) (match_dup 1))] + [(const_int 0)] { - operands[0] = gen_lowpart (DFmode, operands[0]); - operands[1] = gen_lowpart (DFmode, operands[1]); + rtx op1 = operands[1]; + if (REG_P (op1)) + op1 = gen_rtx_REG (DFmode, REGNO (op1)); + else + op1 = gen_lowpart (DFmode, op1); + emit_move_insn (operands[0], op1); + DONE; }) (define_insn "sse2_loadhpd" @@ -2001,14 +1973,6 @@ operands[0] = adjust_address (operands[0], DFmode, 8); }) -(define_expand "sse2_loadsd" - [(set (match_operand:V2DF 0 "register_operand" "") - (vec_concat:V2DF - (match_operand:DF 1 "nonimmediate_operand" "") - (vec_select:DF (match_dup 2) (parallel [(const_int 1)]))))] - "TARGET_SSE2" - "operands[2] = CONST0_RTX (V2DFmode);") - (define_insn "sse2_movsd" [(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,x,m,x,x,o") (vec_merge:V2DF @@ -2026,32 +1990,57 @@ [(set_attr "type" "ssemov,ssemov,ssemov,sselog,ssemov,ssemov") (set_attr "mode" "DF,V1DF,V1DF,V2DF,V1DF,V1DF")]) -(define_insn "sse3_loadddup" +(define_insn "*vec_dupv2df_sse3" [(set (match_operand:V2DF 0 "register_operand" "=x") (vec_duplicate:V2DF (match_operand:DF 1 "nonimmediate_operand" "xm")))] "TARGET_SSE3" "movddup\t{%1, %0|%0, %1}" - [(set_attr "type" "ssecvt") + [(set_attr "type" "sselog1") + (set_attr "mode" "DF")]) + +(define_insn "*vec_dupv2df" + [(set (match_operand:V2DF 0 "register_operand" "=x") + (vec_duplicate:V2DF + (match_operand:DF 1 "register_operand" "0")))] + "TARGET_SSE2" + "unpcklpd\t%0, %0" + [(set_attr "type" "sselog1") + (set_attr "mode" "V4SF")]) + +(define_insn "*vec_concatv2df_sse3" + [(set (match_operand:V2DF 0 "register_operand" "=x") + (vec_concat:V2DF + (match_operand:DF 1 "nonimmediate_operand" "xm") + (match_dup 1)))] + "TARGET_SSE3" + "movddup\t{%1, %0|%0, %1}" + [(set_attr "type" "sselog1") (set_attr "mode" "DF")]) +(define_insn "*vec_concatv2df" + [(set (match_operand:V2DF 0 "register_operand" "=Y,Y,Y,x,x") + (vec_concat:V2DF + (match_operand:DF 1 "nonimmediate_operand" " 0,0,m,0,0") + (match_operand:DF 2 "vector_move_operand" " Y,m,C,x,m")))] + "TARGET_SSE" + "@ + unpcklpd\t{%2, %0|%0, %2} + movhpd\t{%2, %0|%0, %2} + movsd\t{%1, %0|%0, %1} + movlhps\t{%2, %0|%0, %2} + movhps\t{%2, %0|%0, %2}" + [(set_attr "type" "sselog,ssemov,ssemov,ssemov,ssemov") + (set_attr "mode" "V2DF,V1DF,DF,V4SF,V2SF")]) + (define_expand "vec_setv2df" [(match_operand:V2DF 0 "register_operand" "") (match_operand:DF 1 "register_operand" "") (match_operand 2 "const_int_operand" "")] - "TARGET_SSE2" + "TARGET_SSE" { - switch (INTVAL (operands[2])) - { - case 0: - emit_insn (gen_sse2_loadlpd (operands[0], operands[0], operands[1])); - break; - case 1: - emit_insn (gen_sse2_loadhpd (operands[0], operands[0], operands[1])); - break; - default: - abort (); - } + ix86_expand_vector_set (false, operands[0], operands[1], + INTVAL (operands[2])); DONE; }) @@ -2059,28 +2048,19 @@ [(match_operand:DF 0 "register_operand" "") (match_operand:V2DF 1 "register_operand" "") (match_operand 2 "const_int_operand" "")] - "TARGET_SSE2" + "TARGET_SSE" { - switch (INTVAL (operands[2])) - { - case 0: - emit_insn (gen_sse2_storelpd (operands[0], operands[1])); - break; - case 1: - emit_insn (gen_sse2_storehpd (operands[0], operands[1])); - break; - default: - abort (); - } + ix86_expand_vector_extract (false, operands[0], operands[1], + INTVAL (operands[2])); DONE; }) (define_expand "vec_initv2df" [(match_operand:V2DF 0 "register_operand" "") (match_operand 1 "" "")] - "TARGET_SSE2" + "TARGET_SSE" { - ix86_expand_vector_init (operands[0], operands[1]); + ix86_expand_vector_init (false, operands[0], operands[1]); DONE; }) @@ -2661,9 +2641,9 @@ (define_expand "sse2_pinsrw" [(set (match_operand:V8HI 0 "register_operand" "") (vec_merge:V8HI - (match_operand:V8HI 1 "register_operand" "") (vec_duplicate:V8HI (match_operand:SI 2 "nonimmediate_operand" "")) + (match_operand:V8HI 1 "register_operand" "") (match_operand:SI 3 "const_0_to_7_operand" "")))] "TARGET_SSE2" { @@ -2674,9 +2654,9 @@ (define_insn "*sse2_pinsrw" [(set (match_operand:V8HI 0 "register_operand" "=x") (vec_merge:V8HI - (match_operand:V8HI 1 "register_operand" "0") (vec_duplicate:V8HI (match_operand:HI 2 "nonimmediate_operand" "rm")) + (match_operand:V8HI 1 "register_operand" "0") (match_operand:SI 3 "const_pow2_1_to_128_operand" "n")))] "TARGET_SSE2" { @@ -2823,58 +2803,55 @@ (match_operand:SI 1 "nonimmediate_operand" "")) (match_dup 2) (const_int 1)))] - "TARGET_SSE2" + "TARGET_SSE" "operands[2] = CONST0_RTX (V4SImode);") (define_insn "sse2_loadld" - [(set (match_operand:V4SI 0 "register_operand" "=x,x") + [(set (match_operand:V4SI 0 "register_operand" "=Y,x,x") (vec_merge:V4SI (vec_duplicate:V4SI - (match_operand:SI 2 "nonimmediate_operand" "mr,x")) - (match_operand:V4SI 1 "vector_move_operand" "C,0") + (match_operand:SI 2 "nonimmediate_operand" "mr,m,x")) + (match_operand:V4SI 1 "vector_move_operand" " C,C,0") (const_int 1)))] - "TARGET_SSE2" + "TARGET_SSE" "@ movd\t{%2, %0|%0, %2} + movss\t{%2, %0|%0, %2} movss\t{%2, %0|%0, %2}" [(set_attr "type" "ssemov") - (set_attr "mode" "TI")]) + (set_attr "mode" "TI,V4SF,SF")]) +;; ??? The hardware supports more, but TARGET_INTER_UNIT_MOVES must +;; be taken into account, and movdi isn't fully populated even without. (define_insn_and_split "sse2_stored" - [(set (match_operand:SI 0 "nonimmediate_operand" "=mrx") + [(set (match_operand:SI 0 "nonimmediate_operand" "=mx") (vec_select:SI (match_operand:V4SI 1 "register_operand" "x") (parallel [(const_int 0)])))] - "TARGET_SSE2" + "TARGET_SSE" "#" "&& reload_completed" [(set (match_dup 0) (match_dup 1))] { - operands[1] = gen_lowpart (SImode, operands[1]); + operands[1] = gen_rtx_REG (SImode, REGNO (operands[1])); }) -(define_expand "sse2_storeq" +(define_expand "sse_storeq" [(set (match_operand:DI 0 "nonimmediate_operand" "") (vec_select:DI (match_operand:V2DI 1 "register_operand" "") (parallel [(const_int 0)])))] - "TARGET_SSE2" + "TARGET_SSE" "") +;; ??? The hardware supports more, but TARGET_INTER_UNIT_MOVES must +;; be taken into account, and movdi isn't fully populated even without. (define_insn "*sse2_storeq" - [(set (match_operand:DI 0 "nonimmediate_operand" "=myx") - (vec_select:DI - (match_operand:V2DI 1 "register_operand" "x") - (parallel [(const_int 0)])))] - "TARGET_SSE2 && !TARGET_64BIT" - "#") - -(define_insn "*sse2_storeq_rex64" - [(set (match_operand:DI 0 "nonimmediate_operand" "=myxr") + [(set (match_operand:DI 0 "nonimmediate_operand" "=mx") (vec_select:DI (match_operand:V2DI 1 "register_operand" "x") (parallel [(const_int 0)])))] - "TARGET_SSE2 && TARGET_64BIT" + "TARGET_SSE" "#") (define_split @@ -2882,10 +2859,10 @@ (vec_select:DI (match_operand:V2DI 1 "register_operand" "") (parallel [(const_int 0)])))] - "TARGET_SSE2 && reload_completed" + "TARGET_SSE && reload_completed" [(set (match_dup 0) (match_dup 1))] { - operands[1] = gen_lowpart (DImode, operands[1]); + operands[1] = gen_rtx_REG (DImode, REGNO (operands[1])); }) (define_expand "sse2_loadq" @@ -2895,29 +2872,30 @@ (match_operand:DI 1 "nonimmediate_operand" "")) (match_dup 2) (const_int 1)))] - "TARGET_SSE2" + "TARGET_SSE" "operands[2] = CONST0_RTX (V2DImode);") (define_insn "*sse2_loadq" - [(set (match_operand:V2DI 0 "register_operand" "=x,?x,x") + [(set (match_operand:V2DI 0 "register_operand" "=Y,?Y,Y,x") (vec_merge:V2DI (vec_duplicate:V2DI - (match_operand:DI 1 "nonimmediate_operand" " m, y,x")) - (match_operand:V2DI 2 "vector_move_operand" " C, C,0") + (match_operand:DI 1 "nonimmediate_operand" " m,*y,Y,0")) + (match_operand:V2DI 2 "vector_move_operand" " C, C,0,x") (const_int 1)))] - "TARGET_SSE2 && !TARGET_64BIT" + "TARGET_SSE && !TARGET_64BIT" "@ movq\t{%1, %0|%0, %1} movq2dq\t{%1, %0|%0, %1} - movq\t{%1, %0|%0, %1}" - [(set_attr "type" "ssemov") - (set_attr "mode" "TI")]) + movq\t{%1, %0|%0, %1} + shufps\t{$0xe4, %1, %0|%0, %1, 0xe4}" + [(set_attr "type" "ssemov,ssemov,ssemov,sselog") + (set_attr "mode" "TI,TI,TI,V4SF")]) (define_insn "*sse2_loadq_rex64" [(set (match_operand:V2DI 0 "register_operand" "=x,?x,?x,x") (vec_merge:V2DI (vec_duplicate:V2DI - (match_operand:DI 1 "nonimmediate_operand" " m, y, r,x")) + (match_operand:DI 1 "nonimmediate_operand" " m,*y, r,x")) (match_operand:V2DI 2 "vector_move_operand" " C, C, C,0") (const_int 1)))] "TARGET_SSE2 && TARGET_64BIT" @@ -2929,6 +2907,212 @@ [(set_attr "type" "ssemov") (set_attr "mode" "TI")]) +(define_insn "*vec_dupv4si" + [(set (match_operand:V4SI 0 "register_operand" "=Y,x") + (vec_duplicate:V4SI + (match_operand:SI 1 "register_operand" " Y,0")))] + "TARGET_SSE" + "@ + pshufd\t{$0, %1, %0|%0, %1, 0} + shufps\t{$0, %0, %0|%0, %0, 0}" + [(set_attr "type" "sselog1") + (set_attr "mode" "TI,V4SF")]) + +(define_insn "*vec_dupv2di" + [(set (match_operand:V2DI 0 "register_operand" "=Y,x") + (vec_duplicate:V2DI + (match_operand:DI 1 "register_operand" " 0,0")))] + "TARGET_SSE" + "@ + punpcklqdq\t%0, %0 + movlhps\t%0, %0" + [(set_attr "type" "sselog1,ssemov") + (set_attr "mode" "TI,V4SF")]) + +;; ??? In theory we can match memory for the MMX alternative, but allowing +;; nonimmediate_operand for operand 2 and *not* allowing memory for the SSE +;; alternatives pretty much forces the MMX alternative to be chosen. +(define_insn "*sse2_concatv2si" + [(set (match_operand:V2SI 0 "register_operand" "=Y, Y,*y,*y") + (vec_concat:V2SI + (match_operand:SI 1 "nonimmediate_operand" " 0,rm, 0,rm") + (match_operand:SI 2 "reg_or_0_operand" " Y, C,*y, C")))] + "TARGET_SSE2" + "@ + punpckldq\t{%2, %0|%0, %2} + movd\t{%1, %0|%0, %1} + punpckldq\t{%2, %0|%0, %2} + movd\t{%1, %0|%0, %1}" + [(set_attr "type" "sselog,ssemov,mmxcvt,mmxmov") + (set_attr "mode" "TI,TI,DI,DI")]) + +(define_insn "*sse1_concatv2si" + [(set (match_operand:V2SI 0 "register_operand" "=x,x,*y,*y") + (vec_concat:V2SI + (match_operand:SI 1 "nonimmediate_operand" " 0,m, 0,*rm") + (match_operand:SI 2 "reg_or_0_operand" " x,C,*y,C")))] + "TARGET_SSE" + "@ + unpcklps\t{%2, %0|%0, %2} + movss\t{%1, %0|%0, %1} + punpckldq\t{%2, %0|%0, %2} + movd\t{%1, %0|%0, %1}" + [(set_attr "type" "sselog,ssemov,mmxcvt,mmxmov") + (set_attr "mode" "V4SF,V4SF,DI,DI")]) + +(define_insn "*vec_concatv4si_1" + [(set (match_operand:V4SI 0 "register_operand" "=Y,x,x") + (vec_concat:V4SI + (match_operand:V2SI 1 "register_operand" " 0,0,0") + (match_operand:V2SI 2 "nonimmediate_operand" " Y,x,m")))] + "TARGET_SSE" + "@ + punpcklqdq\t{%2, %0|%0, %2} + movlhps\t{%2, %0|%0, %2} + movhps\t{%2, %0|%0, %2}" + [(set_attr "type" "sselog,ssemov,ssemov") + (set_attr "mode" "TI,V4SF,V2SF")]) + +(define_insn "*vec_concatv2di" + [(set (match_operand:V2DI 0 "register_operand" "=Y,?Y,Y,x,x,x") + (vec_concat:V2DI + (match_operand:DI 1 "nonimmediate_operand" " m,*y,0,0,0,m") + (match_operand:DI 2 "vector_move_operand" " C, C,Y,x,m,0")))] + "TARGET_SSE" + "@ + movq\t{%1, %0|%0, %1} + movq2dq\t{%1, %0|%0, %1} + punpcklqdq\t{%2, %0|%0, %2} + movlhps\t{%2, %0|%0, %2} + movhps\t{%2, %0|%0, %2} + movlps\t{%1, %0|%0, %1}" + [(set_attr "type" "ssemov,ssemov,sselog,ssemov,ssemov,ssemov") + (set_attr "mode" "TI,TI,TI,V4SF,V2SF,V2SF")]) + +(define_expand "vec_setv2di" + [(match_operand:V2DI 0 "register_operand" "") + (match_operand:DI 1 "register_operand" "") + (match_operand 2 "const_int_operand" "")] + "TARGET_SSE" +{ + ix86_expand_vector_set (false, operands[0], operands[1], + INTVAL (operands[2])); + DONE; +}) + +(define_expand "vec_extractv2di" + [(match_operand:DI 0 "register_operand" "") + (match_operand:V2DI 1 "register_operand" "") + (match_operand 2 "const_int_operand" "")] + "TARGET_SSE" +{ + ix86_expand_vector_extract (false, operands[0], operands[1], + INTVAL (operands[2])); + DONE; +}) + +(define_expand "vec_initv2di" + [(match_operand:V2DI 0 "register_operand" "") + (match_operand 1 "" "")] + "TARGET_SSE" +{ + ix86_expand_vector_init (false, operands[0], operands[1]); + DONE; +}) + +(define_expand "vec_setv4si" + [(match_operand:V4SI 0 "register_operand" "") + (match_operand:SI 1 "register_operand" "") + (match_operand 2 "const_int_operand" "")] + "TARGET_SSE" +{ + ix86_expand_vector_set (false, operands[0], operands[1], + INTVAL (operands[2])); + DONE; +}) + +(define_expand "vec_extractv4si" + [(match_operand:SI 0 "register_operand" "") + (match_operand:V4SI 1 "register_operand" "") + (match_operand 2 "const_int_operand" "")] + "TARGET_SSE" +{ + ix86_expand_vector_extract (false, operands[0], operands[1], + INTVAL (operands[2])); + DONE; +}) + +(define_expand "vec_initv4si" + [(match_operand:V4SI 0 "register_operand" "") + (match_operand 1 "" "")] + "TARGET_SSE" +{ + ix86_expand_vector_init (false, operands[0], operands[1]); + DONE; +}) + +(define_expand "vec_setv8hi" + [(match_operand:V8HI 0 "register_operand" "") + (match_operand:HI 1 "register_operand" "") + (match_operand 2 "const_int_operand" "")] + "TARGET_SSE" +{ + ix86_expand_vector_set (false, operands[0], operands[1], + INTVAL (operands[2])); + DONE; +}) + +(define_expand "vec_extractv8hi" + [(match_operand:HI 0 "register_operand" "") + (match_operand:V8HI 1 "register_operand" "") + (match_operand 2 "const_int_operand" "")] + "TARGET_SSE" +{ + ix86_expand_vector_extract (false, operands[0], operands[1], + INTVAL (operands[2])); + DONE; +}) + +(define_expand "vec_initv8hi" + [(match_operand:V8HI 0 "register_operand" "") + (match_operand 1 "" "")] + "TARGET_SSE" +{ + ix86_expand_vector_init (false, operands[0], operands[1]); + DONE; +}) + +(define_expand "vec_setv16qi" + [(match_operand:V16QI 0 "register_operand" "") + (match_operand:QI 1 "register_operand" "") + (match_operand 2 "const_int_operand" "")] + "TARGET_SSE" +{ + ix86_expand_vector_set (false, operands[0], operands[1], + INTVAL (operands[2])); + DONE; +}) + +(define_expand "vec_extractv16qi" + [(match_operand:QI 0 "register_operand" "") + (match_operand:V16QI 1 "register_operand" "") + (match_operand 2 "const_int_operand" "")] + "TARGET_SSE" +{ + ix86_expand_vector_extract (false, operands[0], operands[1], + INTVAL (operands[2])); + DONE; +}) + +(define_expand "vec_initv16qi" + [(match_operand:V16QI 0 "register_operand" "") + (match_operand 1 "" "")] + "TARGET_SSE" +{ + ix86_expand_vector_init (false, operands[0], operands[1]); + DONE; +}) + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Miscelaneous diff --git a/gcc/config/i386/xmmintrin.h b/gcc/config/i386/xmmintrin.h index 6ef302e..6c56973 100644 --- a/gcc/config/i386/xmmintrin.h +++ b/gcc/config/i386/xmmintrin.h @@ -86,6 +86,13 @@ enum _mm_hint #define _MM_FLUSH_ZERO_ON 0x8000 #define _MM_FLUSH_ZERO_OFF 0x0000 +/* Create a vector of zeros. */ +static __inline __m128 +_mm_setzero_ps (void) +{ + return (__m128){ 0.0f, 0.0f, 0.0f, 0.0f }; +} + /* Perform the respective operation on the lower SPFP (single-precision floating-point) values of A and B; the upper three SPFP values are passed through from A. */ @@ -590,15 +597,14 @@ _mm_cvtpi16_ps (__m64 __A) /* This comparison against zero gives us a mask that can be used to fill in the missing sign bits in the unpack operations below, so that we get signed values after unpacking. */ - __sign = (__v4hi) __builtin_ia32_mmx_zero (); - __sign = __builtin_ia32_pcmpgtw (__sign, (__v4hi)__A); + __sign = __builtin_ia32_pcmpgtw ((__v4hi)0LL, (__v4hi)__A); /* Convert the four words to doublewords. */ __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign); __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign); /* Convert the doublewords to floating point two at a time. */ - __r = (__v4sf) __builtin_ia32_setzerops (); + __r = (__v4sf) _mm_setzero_ps (); __r = __builtin_ia32_cvtpi2ps (__r, __hisi); __r = __builtin_ia32_movlhps (__r, __r); __r = __builtin_ia32_cvtpi2ps (__r, __losi); @@ -610,16 +616,15 @@ _mm_cvtpi16_ps (__m64 __A) static __inline __m128 _mm_cvtpu16_ps (__m64 __A) { - __v4hi __zero = (__v4hi) __builtin_ia32_mmx_zero (); __v2si __hisi, __losi; __v4sf __r; /* Convert the four words to doublewords. */ - __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __zero); - __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __zero); + __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, (__v4hi)0LL); + __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, (__v4hi)0LL); /* Convert the doublewords to floating point two at a time. */ - __r = (__v4sf) __builtin_ia32_setzerops (); + __r = (__v4sf) _mm_setzero_ps (); __r = __builtin_ia32_cvtpi2ps (__r, __hisi); __r = __builtin_ia32_movlhps (__r, __r); __r = __builtin_ia32_cvtpi2ps (__r, __losi); @@ -636,8 +641,7 @@ _mm_cvtpi8_ps (__m64 __A) /* This comparison against zero gives us a mask that can be used to fill in the missing sign bits in the unpack operations below, so that we get signed values after unpacking. */ - __sign = (__v8qi) __builtin_ia32_mmx_zero (); - __sign = __builtin_ia32_pcmpgtb (__sign, (__v8qi)__A); + __sign = __builtin_ia32_pcmpgtb ((__v8qi)0LL, (__v8qi)__A); /* Convert the four low bytes to words. */ __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign); @@ -649,8 +653,7 @@ _mm_cvtpi8_ps (__m64 __A) static __inline __m128 _mm_cvtpu8_ps(__m64 __A) { - __v8qi __zero = (__v8qi) __builtin_ia32_mmx_zero (); - __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __zero); + __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL); return _mm_cvtpu16_ps(__A); } @@ -658,7 +661,7 @@ _mm_cvtpu8_ps(__m64 __A) static __inline __m128 _mm_cvtpi32x2_ps(__m64 __A, __m64 __B) { - __v4sf __zero = (__v4sf) __builtin_ia32_setzerops (); + __v4sf __zero = (__v4sf) _mm_setzero_ps (); __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A); __v4sf __sfb = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__B); return (__m128) __builtin_ia32_movlhps (__sfa, __sfb); @@ -680,8 +683,7 @@ static __inline __m64 _mm_cvtps_pi8(__m128 __A) { __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A); - __v4hi __zero = (__v4hi) __builtin_ia32_mmx_zero (); - return (__m64) __builtin_ia32_packsswb (__tmp, __zero); + return (__m64) __builtin_ia32_packsswb (__tmp, (__v4hi)0LL); } /* Selects four specific SPFP values from A and B based on MASK. */ @@ -826,19 +828,38 @@ _MM_SET_FLUSH_ZERO_MODE (unsigned int __mode) _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode); } +/* Create a vector with element 0 as F and the rest zero. */ +static __inline __m128 +_mm_set_ss (float __F) +{ + return (__m128)(__v4sf){ __F, 0, 0, 0 }; +} + +/* Create a vector with all four elements equal to F. */ +static __inline __m128 +_mm_set1_ps (float __F) +{ + return (__m128)(__v4sf){ __F, __F, __F, __F }; +} + +static __inline __m128 +_mm_set_ps1 (float __F) +{ + return _mm_set1_ps (__F); +} + /* Create a vector with element 0 as *P and the rest zero. */ static __inline __m128 _mm_load_ss (float const *__P) { - return (__m128) __builtin_ia32_loadss (__P); + return _mm_set_ss (*__P); } /* Create a vector with all four elements equal to *P. */ static __inline __m128 _mm_load1_ps (float const *__P) { - __v4sf __tmp = __builtin_ia32_loadss (__P); - return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,0,0,0)); + return _mm_set1_ps (*__P); } static __inline __m128 @@ -851,7 +872,7 @@ _mm_load_ps1 (float const *__P) static __inline __m128 _mm_load_ps (float const *__P) { - return (__m128) __builtin_ia32_loadaps (__P); + return (__m128) *(__v4sf *)__P; } /* Load four SPFP values from P. The address need not be 16-byte aligned. */ @@ -865,86 +886,58 @@ _mm_loadu_ps (float const *__P) static __inline __m128 _mm_loadr_ps (float const *__P) { - __v4sf __tmp = __builtin_ia32_loadaps (__P); + __v4sf __tmp = *(__v4sf *)__P; return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3)); } -/* Create a vector with element 0 as F and the rest zero. */ -static __inline __m128 -_mm_set_ss (float __F) -{ - return (__m128) __builtin_ia32_loadss (&__F); -} - -/* Create a vector with all four elements equal to F. */ -static __inline __m128 -_mm_set1_ps (float __F) -{ - __v4sf __tmp = __builtin_ia32_loadss (&__F); - return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,0,0,0)); -} - -static __inline __m128 -_mm_set_ps1 (float __F) -{ - return _mm_set1_ps (__F); -} - /* Create the vector [Z Y X W]. */ static __inline __m128 _mm_set_ps (const float __Z, const float __Y, const float __X, const float __W) { - return (__v4sf) {__W, __X, __Y, __Z}; + return (__m128)(__v4sf){ __W, __X, __Y, __Z }; } /* Create the vector [W X Y Z]. */ static __inline __m128 _mm_setr_ps (float __Z, float __Y, float __X, float __W) { - return _mm_set_ps (__W, __X, __Y, __Z); -} - -/* Create a vector of zeros. */ -static __inline __m128 -_mm_setzero_ps (void) -{ - return (__m128) __builtin_ia32_setzerops (); + return (__m128)(__v4sf){ __Z, __Y, __X, __W }; } /* Stores the lower SPFP value. */ static __inline void _mm_store_ss (float *__P, __m128 __A) { - __builtin_ia32_storess (__P, (__v4sf)__A); + *__P = __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0); } -/* Store the lower SPFP value across four words. */ +/* Store four SPFP values. The address must be 16-byte aligned. */ static __inline void -_mm_store1_ps (float *__P, __m128 __A) +_mm_store_ps (float *__P, __m128 __A) { - __v4sf __va = (__v4sf)__A; - __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0)); - __builtin_ia32_storeaps (__P, __tmp); + *(__v4sf *)__P = (__v4sf)__A; } +/* Store four SPFP values. The address need not be 16-byte aligned. */ static __inline void -_mm_store_ps1 (float *__P, __m128 __A) +_mm_storeu_ps (float *__P, __m128 __A) { - _mm_store1_ps (__P, __A); + __builtin_ia32_storeups (__P, (__v4sf)__A); } -/* Store four SPFP values. The address must be 16-byte aligned. */ +/* Store the lower SPFP value across four words. */ static __inline void -_mm_store_ps (float *__P, __m128 __A) +_mm_store1_ps (float *__P, __m128 __A) { - __builtin_ia32_storeaps (__P, (__v4sf)__A); + __v4sf __va = (__v4sf)__A; + __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0)); + _mm_storeu_ps (__P, __tmp); } -/* Store four SPFP values. The address need not be 16-byte aligned. */ static __inline void -_mm_storeu_ps (float *__P, __m128 __A) +_mm_store_ps1 (float *__P, __m128 __A) { - __builtin_ia32_storeups (__P, (__v4sf)__A); + _mm_store1_ps (__P, __A); } /* Store four SPFP values in reverse order. The address must be aligned. */ @@ -953,7 +946,7 @@ _mm_storer_ps (float *__P, __m128 __A) { __v4sf __va = (__v4sf)__A; __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3)); - __builtin_ia32_storeaps (__P, __tmp); + _mm_store_ps (__P, __tmp); } /* Sets the low SPFP value of A from the low value of B. */ @@ -965,40 +958,39 @@ _mm_move_ss (__m128 __A, __m128 __B) /* Extracts one of the four words of A. The selector N must be immediate. */ #if 0 -static __inline int -_mm_extract_pi16 (__m64 __A, int __N) +static __inline int __attribute__((__always_inline__)) +_mm_extract_pi16 (__m64 const __A, int const __N) { - return __builtin_ia32_pextrw ((__v4hi)__A, __N); + return __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N); } -static __inline int -_m_pextrw (__m64 __A, int __N) +static __inline int __attribute__((__always_inline__)) +_m_pextrw (__m64 const __A, int const __N) { return _mm_extract_pi16 (__A, __N); } #else -#define _mm_extract_pi16(A, N) \ - __builtin_ia32_pextrw ((__v4hi)(A), (N)) +#define _mm_extract_pi16(A, N) __builtin_ia32_vec_ext_v4hi ((__v4hi)(A), (N)) #define _m_pextrw(A, N) _mm_extract_pi16((A), (N)) #endif /* Inserts word D into one of four words of A. The selector N must be immediate. */ #if 0 -static __inline __m64 -_mm_insert_pi16 (__m64 __A, int __D, int __N) +static __inline __m64 __attribute__((__always_inline__)) +_mm_insert_pi16 (__m64 const __A, int const __D, int const __N) { - return (__m64)__builtin_ia32_pinsrw ((__v4hi)__A, __D, __N); + return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N); } -static __inline __m64 -_m_pinsrw (__m64 __A, int __D, int __N) +static __inline __m64 __attribute__((__always_inline__)) +_m_pinsrw (__m64 const __A, int const __D, int const __N) { return _mm_insert_pi16 (__A, __D, __N); } #else #define _mm_insert_pi16(A, D, N) \ - ((__m64) __builtin_ia32_pinsrw ((__v4hi)(A), (D), (N))) + ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(A), (D), (N))) #define _m_pinsrw(A, D, N) _mm_insert_pi16((A), (D), (N)) #endif diff --git a/gcc/testsuite/gcc.target/i386/pr13366.c b/gcc/testsuite/gcc.target/i386/pr13366.c new file mode 100644 index 0000000..f0dce0b --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/pr13366.c @@ -0,0 +1,13 @@ +/* { dg-do compile } */ +/* { dg-options "-O -msse" } */ + +#include + +typedef unsigned short v4hi __attribute__ ((vector_size (8))); + +int f(unsigned short n) +{ + __m64 vec = (__m64)(v4hi){ 0, 0, 1, n }; + __m64 hw = _mm_mulhi_pi16 (vec, vec); + return _mm_extract_pi16 (hw, 0); +} -- cgit v1.1