diff options
author | Jinsong Ji <jji@us.ibm.com> | 2018-09-27 20:48:53 +0000 |
---|---|---|
committer | William Schmidt <wschmidt@gcc.gnu.org> | 2018-09-27 20:48:53 +0000 |
commit | f20ac63787f7f4801c7190b4b74062d2bd481872 (patch) | |
tree | 1deeee62258ad7ac0206ba824217aa73bf08d691 /gcc | |
parent | f3f518f2d86c2fe15887b35882f5ae74eb0e1693 (diff) | |
download | gcc-f20ac63787f7f4801c7190b4b74062d2bd481872.zip gcc-f20ac63787f7f4801c7190b4b74062d2bd481872.tar.gz gcc-f20ac63787f7f4801c7190b4b74062d2bd481872.tar.bz2 |
mmintrin.h (_mm_unpackhi_pi8): Change __int128_t to __int128.
2018-09-27 Jinsong Ji <jji@us.ibm.com>
Bill Schmidt <wschmidt@linux.ibm.com>
* config/rs6000/mmintrin.h (_mm_unpackhi_pi8): Change __int128_t
to __int128.
(_mm_unpacklo_pi8): Likewise.
(_mm_add_pi8): Likewise.
(_mm_add_pi16): Likewise.
(_mm_add_pi32): Likewise.
(_mm_sub_pi8): Likewise.
(_mm_sub_pi16): Likewise.
(_mm_sub_pi32): Likewise.
(_mm_cmpgt_pi8): Likewise.
(_mm_cmpeq_pi16): Likewise.
(_mm_cmpgt_pi16): Likewise.
(_mm_cmpeq_pi32): Likewise.
(_mm_cmpgt_pi32): Likewise.
(_mm_adds_pi8): Likewise.
(_mm_adds_pi16): Likewise.
(_mm_adds_pu8): Likewise.
(_mm_adds_pu16): Likewise.
(_mm_subs_pi8): Likewise.
(_mm_subs_pi16): Likewise.
(_mm_subs_pu8): Likewise.
(_mm_subs_pu16): Likewise.
(_mm_madd_pi16): Likewise.
(_mm_mulhi_pi16): Likewise.
(_mm_mullo_pi16): Likewise.
(_mm_sll_pi16): Likewise.
(_mm_sra_pi16): Likewise.
(_mm_srl_pi16): Likewise.
(_mm_set1_pi16): Likewise.
(_mm_set1_pi8): Likewise.
* config/rs6000/xmmintrin.h (_mm_max_pi16): Likewise.
(_mm_max_pu8): Likewise.
(_mm_min_pi16): Likewise.
(_mm_min_pu8): Likewise.
From-SVN: r264678
Diffstat (limited to 'gcc')
-rw-r--r-- | gcc/config/rs6000/mmintrin.h | 58 | ||||
-rw-r--r-- | gcc/config/rs6000/xmmintrin.h | 8 |
2 files changed, 33 insertions, 33 deletions
diff --git a/gcc/config/rs6000/mmintrin.h b/gcc/config/rs6000/mmintrin.h index b15dd9b..78b7cae 100644 --- a/gcc/config/rs6000/mmintrin.h +++ b/gcc/config/rs6000/mmintrin.h @@ -236,7 +236,7 @@ _mm_unpackhi_pi8 (__m64 __m1, __m64 __m2) a = (__vector unsigned char)vec_splats (__m1); b = (__vector unsigned char)vec_splats (__m2); c = vec_mergel (a, b); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0)); #else __m64_union m1, m2, res; @@ -317,7 +317,7 @@ _mm_unpacklo_pi8 (__m64 __m1, __m64 __m2) a = (__vector unsigned char)vec_splats (__m1); b = (__vector unsigned char)vec_splats (__m2); c = vec_mergel (a, b); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 1)); + return (__builtin_unpack_vector_int128 ((__vector __int128)c, 1)); #else __m64_union m1, m2, res; @@ -398,7 +398,7 @@ _mm_add_pi8 (__m64 __m1, __m64 __m2) a = (__vector signed char)vec_splats (__m1); b = (__vector signed char)vec_splats (__m2); c = vec_add (a, b); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0)); #else __m64_union m1, m2, res; @@ -434,7 +434,7 @@ _mm_add_pi16 (__m64 __m1, __m64 __m2) a = (__vector signed short)vec_splats (__m1); b = (__vector signed short)vec_splats (__m2); c = vec_add (a, b); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0)); #else __m64_union m1, m2, res; @@ -466,7 +466,7 @@ _mm_add_pi32 (__m64 __m1, __m64 __m2) a = (__vector signed int)vec_splats (__m1); b = (__vector signed int)vec_splats (__m2); c = vec_add (a, b); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0)); #else __m64_union m1, m2, res; @@ -496,7 +496,7 @@ _mm_sub_pi8 (__m64 __m1, __m64 __m2) a = (__vector signed char)vec_splats (__m1); b = (__vector signed char)vec_splats (__m2); c = vec_sub (a, b); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0)); #else __m64_union m1, m2, res; @@ -532,7 +532,7 @@ _mm_sub_pi16 (__m64 __m1, __m64 __m2) a = (__vector signed short)vec_splats (__m1); b = (__vector signed short)vec_splats (__m2); c = vec_sub (a, b); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0)); #else __m64_union m1, m2, res; @@ -564,7 +564,7 @@ _mm_sub_pi32 (__m64 __m1, __m64 __m2) a = (__vector signed int)vec_splats (__m1); b = (__vector signed int)vec_splats (__m2); c = vec_sub (a, b); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0)); #else __m64_union m1, m2, res; @@ -754,7 +754,7 @@ _mm_cmpgt_pi8 (__m64 __m1, __m64 __m2) a = (__vector signed char)vec_splats (__m1); b = (__vector signed char)vec_splats (__m2); c = (__vector signed char)vec_cmpgt (a, b); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0)); #else __m64_union m1, m2, res; @@ -791,7 +791,7 @@ _mm_cmpeq_pi16 (__m64 __m1, __m64 __m2) a = (__vector signed short)vec_splats (__m1); b = (__vector signed short)vec_splats (__m2); c = (__vector signed short)vec_cmpeq (a, b); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0)); #else __m64_union m1, m2, res; @@ -822,7 +822,7 @@ _mm_cmpgt_pi16 (__m64 __m1, __m64 __m2) a = (__vector signed short)vec_splats (__m1); b = (__vector signed short)vec_splats (__m2); c = (__vector signed short)vec_cmpgt (a, b); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0)); #else __m64_union m1, m2, res; @@ -855,7 +855,7 @@ _mm_cmpeq_pi32 (__m64 __m1, __m64 __m2) a = (__vector signed int)vec_splats (__m1); b = (__vector signed int)vec_splats (__m2); c = (__vector signed int)vec_cmpeq (a, b); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0)); #else __m64_union m1, m2, res; @@ -884,7 +884,7 @@ _mm_cmpgt_pi32 (__m64 __m1, __m64 __m2) a = (__vector signed int)vec_splats (__m1); b = (__vector signed int)vec_splats (__m2); c = (__vector signed int)vec_cmpgt (a, b); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0)); #else __m64_union m1, m2, res; @@ -915,7 +915,7 @@ _mm_adds_pi8 (__m64 __m1, __m64 __m2) a = (__vector signed char)vec_splats (__m1); b = (__vector signed char)vec_splats (__m2); c = vec_adds (a, b); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0)); } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -933,7 +933,7 @@ _mm_adds_pi16 (__m64 __m1, __m64 __m2) a = (__vector signed short)vec_splats (__m1); b = (__vector signed short)vec_splats (__m2); c = vec_adds (a, b); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0)); } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -951,7 +951,7 @@ _mm_adds_pu8 (__m64 __m1, __m64 __m2) a = (__vector unsigned char)vec_splats (__m1); b = (__vector unsigned char)vec_splats (__m2); c = vec_adds (a, b); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0)); } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -970,7 +970,7 @@ _mm_adds_pu16 (__m64 __m1, __m64 __m2) a = (__vector unsigned short)vec_splats (__m1); b = (__vector unsigned short)vec_splats (__m2); c = vec_adds (a, b); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0)); } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -989,7 +989,7 @@ _mm_subs_pi8 (__m64 __m1, __m64 __m2) a = (__vector signed char)vec_splats (__m1); b = (__vector signed char)vec_splats (__m2); c = vec_subs (a, b); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0)); } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -1008,7 +1008,7 @@ _mm_subs_pi16 (__m64 __m1, __m64 __m2) a = (__vector signed short)vec_splats (__m1); b = (__vector signed short)vec_splats (__m2); c = vec_subs (a, b); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0)); } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -1027,7 +1027,7 @@ _mm_subs_pu8 (__m64 __m1, __m64 __m2) a = (__vector unsigned char)vec_splats (__m1); b = (__vector unsigned char)vec_splats (__m2); c = vec_subs (a, b); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0)); } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -1046,7 +1046,7 @@ _mm_subs_pu16 (__m64 __m1, __m64 __m2) a = (__vector unsigned short)vec_splats (__m1); b = (__vector unsigned short)vec_splats (__m2); c = vec_subs (a, b); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0)); } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -1068,7 +1068,7 @@ _mm_madd_pi16 (__m64 __m1, __m64 __m2) a = (__vector signed short)vec_splats (__m1); b = (__vector signed short)vec_splats (__m2); c = vec_vmsumshm (a, b, zero); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0)); } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -1096,7 +1096,7 @@ _mm_mulhi_pi16 (__m64 __m1, __m64 __m2) w1 = vec_vmulosh (a, b); c = (__vector signed short)vec_perm (w0, w1, xform1); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0)); } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -1115,7 +1115,7 @@ _mm_mullo_pi16 (__m64 __m1, __m64 __m2) a = (__vector signed short)vec_splats (__m1); b = (__vector signed short)vec_splats (__m2); c = a * b; - return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0)); } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -1136,7 +1136,7 @@ _mm_sll_pi16 (__m64 __m, __m64 __count) m = (__vector signed short)vec_splats (__m); c = (__vector unsigned short)vec_splats ((unsigned short)__count); r = vec_sl (m, (__vector unsigned short)c); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)r, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)r, 0)); } else return (0); @@ -1205,7 +1205,7 @@ _mm_sra_pi16 (__m64 __m, __m64 __count) m = (__vector signed short)vec_splats (__m); c = (__vector unsigned short)vec_splats ((unsigned short)__count); r = vec_sra (m, (__vector unsigned short)c); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)r, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)r, 0)); } else return (0); @@ -1274,7 +1274,7 @@ _mm_srl_pi16 (__m64 __m, __m64 __count) m = (__vector unsigned short)vec_splats (__m); c = (__vector unsigned short)vec_splats ((unsigned short)__count); r = vec_sr (m, (__vector unsigned short)c); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)r, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)r, 0)); } else return (0); @@ -1417,7 +1417,7 @@ _mm_set1_pi16 (short __w) __vector signed short w; w = (__vector signed short)vec_splats (__w); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)w, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)w, 0)); #else __m64_union res; @@ -1437,7 +1437,7 @@ _mm_set1_pi8 (signed char __b) __vector signed char b; b = (__vector signed char)vec_splats (__b); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)b, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)b, 0)); #else __m64_union res; diff --git a/gcc/config/rs6000/xmmintrin.h b/gcc/config/rs6000/xmmintrin.h index 11ecbd8..ed6f121 100644 --- a/gcc/config/rs6000/xmmintrin.h +++ b/gcc/config/rs6000/xmmintrin.h @@ -1421,7 +1421,7 @@ _mm_max_pi16 (__m64 __A, __m64 __B) b = (__vector signed short)vec_splats (__B); c = (__vector __bool short)vec_cmpgt (a, b); r = vec_sel (b, a, c); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)r, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)r, 0)); #else __m64_union m1, m2, res; @@ -1459,7 +1459,7 @@ _mm_max_pu8 (__m64 __A, __m64 __B) b = (__vector unsigned char)vec_splats (__B); c = (__vector __bool char)vec_cmpgt (a, b); r = vec_sel (b, a, c); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)r, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)r, 0)); #else __m64_union m1, m2, res; long i; @@ -1495,7 +1495,7 @@ _mm_min_pi16 (__m64 __A, __m64 __B) b = (__vector signed short)vec_splats (__B); c = (__vector __bool short)vec_cmplt (a, b); r = vec_sel (b, a, c); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)r, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)r, 0)); #else __m64_union m1, m2, res; @@ -1533,7 +1533,7 @@ _mm_min_pu8 (__m64 __A, __m64 __B) b = (__vector unsigned char)vec_splats (__B); c = (__vector __bool char)vec_cmplt (a, b); r = vec_sel (b, a, c); - return (__builtin_unpack_vector_int128 ((__vector __int128_t)r, 0)); + return (__builtin_unpack_vector_int128 ((__vector __int128)r, 0)); #else __m64_union m1, m2, res; long i; |