diff options
author | Martin Liska <mliska@suse.cz> | 2021-08-27 10:52:00 +0200 |
---|---|---|
committer | Martin Liska <mliska@suse.cz> | 2021-08-27 10:52:00 +0200 |
commit | e07d0e579a4e532ac4bd2d223105d73d6418868f (patch) | |
tree | fea3daf24b89b9d1872aa12a6f85d89caeb1a58c | |
parent | e88d1c83cdd1d349dc34f402e92363ba9393ee46 (diff) | |
parent | 44a545a6abdd330083c1d12ad70092defbba702a (diff) | |
download | gcc-e07d0e579a4e532ac4bd2d223105d73d6418868f.zip gcc-e07d0e579a4e532ac4bd2d223105d73d6418868f.tar.gz gcc-e07d0e579a4e532ac4bd2d223105d73d6418868f.tar.bz2 |
Merge branch 'master' into devel/sphinx
47 files changed, 8174 insertions, 334 deletions
diff --git a/contrib/ChangeLog b/contrib/ChangeLog index d496dc6..eeedef32 100644 --- a/contrib/ChangeLog +++ b/contrib/ChangeLog @@ -1,3 +1,8 @@ +2021-08-26 Martin Liska <mliska@suse.cz> + + * mklog.py: Use file.{source,target}_file for proper rename + handling. + 2021-08-24 Andrew Pinski <apinski@marvell.com> PR other/82704 diff --git a/gcc/ChangeLog b/gcc/ChangeLog index fe8242b..4e7991e 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,99 @@ +2021-08-26 Uroš Bizjak <ubizjak@gmail.com> + + * config/i386/i386.md (*btr<mode>_1): Call force_reg unconditionally. + (conditional moves with memory inputs splitters): Ditto. + * config/i386/sse.md (one_cmpl<mode>2): Simplify. + +2021-08-26 Jan Hubicka <hubicka@ucw.cz> + + * ipa-modref-tree.h (modref_access_node::try_merge_with): Restart + search after merging. + +2021-08-26 Bill Schmidt <wschmidt@linux.ibm.com> + + * config/rs6000/rs6000-overload.def: Add remaining overloads. + +2021-08-26 Bill Schmidt <wschmidt@linux.ibm.com> + + * config/rs6000/rs6000-builtin-new.def: Add cell stanza. + +2021-08-26 Bill Schmidt <wschmidt@linux.ibm.com> + + * config/rs6000/rs6000-builtin-new.def: Add ieee128-hw, dfp, + crypto, and htm stanzas. + +2021-08-26 Bill Schmidt <wschmidt@linux.ibm.com> + + * config/rs6000/rs6000-builtin-new.def: Add mma stanza. + +2021-08-26 Martin Sebor <msebor@redhat.com> + + * tree-ssa-uninit.c (warn_uninit): Refactor and simplify. + (warn_uninit_phi_uses): Remove argument from calls to warn_uninit. + (warn_uninitialized_vars): Same. Reduce visibility of locals. + (warn_uninitialized_phi): Same. + +2021-08-26 Roger Sayle <roger@nextmovesoftware.com> + + * tree-ssa-ccp.c (get_individual_bits): Helper function to + extract the individual bits from a widest_int constant (mask). + (gray_code_bit_flips): New read-only table for effiently + enumerating permutations/combinations of bits. + (bit_value_binop) [LROTATE_EXPR, RROTATE_EXPR]: Handle rotates + by unknown counts that are guaranteed less than the target + precision and four or fewer unknown bits by enumeration. + [LSHIFT_EXPR, RSHIFT_EXPR]: Likewise, also handle shifts by + enumeration under the same conditions. Handle remaining + shifts as a mask based upon the minimum possible shift value. + +2021-08-26 Roger Sayle <roger@nextmovesoftware.com> + Richard Biener <rguenther@suse.de> + + * match.pd (shift transformations): Remove a redundant + !POINTER_TYPE_P check. + +2021-08-26 Uroš Bizjak <ubizjak@gmail.com> + + PR target/102057 + * config/i386/i386.md (cmove reg-reg move elimination peephole2s): + Set all_regs to true in the call to replace_rtx. + +2021-08-26 Jan Hubicka <hubicka@ucw.cz> + + * ipa-modref-tree.c (test_insert_search_collapse): Update test. + * ipa-modref-tree.h (modref_base_node::insert): Be smarter when + hiting --param modref-max-refs limit. + (modref_tree:insert_base): Be smarter when hitting + --param modref-max-bases limit. Add new parameter REF. + (modref_tree:insert): Update. + (modref_tree:merge): Update. + * ipa-modref.c (read_modref_records): Update. + +2021-08-26 Jan Hubicka <hubicka@ucw.cz> + + * params.opt: (modref-max-adjustments): Add full stop. + +2021-08-26 Jan Hubicka <hubicka@ucw.cz> + + * ipa-modref-tree.h (modref_ref_node::verify): New member + functoin. + (modref_ref_node::insert): Use it. + (modref_ref_node::try_mere_with): Fix off by one error. + +2021-08-26 Martin Liska <mliska@suse.cz> + Stefan Kneifel <stefan.kneifel@bluewin.ch> + + * cgraph.h (create_version_clone_with_body): Add new parameter. + * cgraphclones.c: Likewise. + * multiple_target.c (create_dispatcher_calls): Do not use + numbered suffixes. + (create_target_clone): Likewise here. + +2021-08-26 Jonathan Yong <10walls@gmail.com> + + * doc/extend.texi: Add note about reserved priorities + to the constructor attribute. + 2021-08-25 Martin Sebor <msebor@redhat.com> * gimple-range-cache.cc (ssa_global_cache::dump): Avoid printing diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP index 98be008..ac2bf5d 100644 --- a/gcc/DATESTAMP +++ b/gcc/DATESTAMP @@ -1 +1 @@ -20210826 +20210827 diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c index ddbbbce..3bb2cab 100644 --- a/gcc/config/i386/i386.c +++ b/gcc/config/i386/i386.c @@ -17559,6 +17559,21 @@ ix86_vector_shift_count (tree arg1) return NULL_TREE; } +/* Return true if arg_mask is all ones, ELEMS is elements number of + corresponding vector. */ +static bool +ix86_masked_all_ones (unsigned HOST_WIDE_INT elems, tree arg_mask) +{ + if (TREE_CODE (arg_mask) != INTEGER_CST) + return false; + + unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (arg_mask); + if ((mask | (HOST_WIDE_INT_M1U << elems)) != HOST_WIDE_INT_M1U) + return false; + + return true; +} + static tree ix86_fold_builtin (tree fndecl, int n_args, tree *args, bool ignore ATTRIBUTE_UNUSED) @@ -18044,6 +18059,7 @@ ix86_gimple_fold_builtin (gimple_stmt_iterator *gsi) enum tree_code tcode; unsigned HOST_WIDE_INT count; bool is_vshift; + unsigned HOST_WIDE_INT elems; switch (fn_code) { @@ -18367,17 +18383,11 @@ ix86_gimple_fold_builtin (gimple_stmt_iterator *gsi) gcc_assert (n_args >= 2); arg0 = gimple_call_arg (stmt, 0); arg1 = gimple_call_arg (stmt, 1); - if (n_args > 2) - { - /* This is masked shift. Only optimize if the mask is all ones. */ - tree argl = gimple_call_arg (stmt, n_args - 1); - if (!tree_fits_uhwi_p (argl)) - break; - unsigned HOST_WIDE_INT mask = tree_to_uhwi (argl); - unsigned elems = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0)); - if ((mask | (HOST_WIDE_INT_M1U << elems)) != HOST_WIDE_INT_M1U) - break; - } + elems = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0)); + /* For masked shift, only optimize if the mask is all ones. */ + if (n_args > 2 + && !ix86_masked_all_ones (elems, gimple_call_arg (stmt, n_args - 1))) + break; if (is_vshift) { if (TREE_CODE (arg1) != VECTOR_CST) @@ -18426,25 +18436,62 @@ ix86_gimple_fold_builtin (gimple_stmt_iterator *gsi) } break; + case IX86_BUILTIN_SHUFPD512: + case IX86_BUILTIN_SHUFPS512: case IX86_BUILTIN_SHUFPD: + case IX86_BUILTIN_SHUFPD256: + case IX86_BUILTIN_SHUFPS: + case IX86_BUILTIN_SHUFPS256: + arg0 = gimple_call_arg (stmt, 0); + elems = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0)); + /* This is masked shuffle. Only optimize if the mask is all ones. */ + if (n_args > 3 + && !ix86_masked_all_ones (elems, + gimple_call_arg (stmt, n_args - 1))) + break; arg2 = gimple_call_arg (stmt, 2); if (TREE_CODE (arg2) == INTEGER_CST) { + unsigned HOST_WIDE_INT shuffle_mask = TREE_INT_CST_LOW (arg2); + /* Check valid imm, refer to gcc.target/i386/testimm-10.c. */ + if (shuffle_mask > 255) + return false; + + machine_mode imode = GET_MODE_INNER (TYPE_MODE (TREE_TYPE (arg0))); location_t loc = gimple_location (stmt); - unsigned HOST_WIDE_INT imask = TREE_INT_CST_LOW (arg2); - arg0 = gimple_call_arg (stmt, 0); + tree itype = (imode == E_DFmode + ? long_long_integer_type_node : integer_type_node); + tree vtype = build_vector_type (itype, elems); + tree_vector_builder elts (vtype, elems, 1); + + + /* Transform integer shuffle_mask to vector perm_mask which + is used by vec_perm_expr, refer to shuflp[sd]256/512 in sse.md. */ + for (unsigned i = 0; i != elems; i++) + { + unsigned sel_idx; + /* Imm[1:0](if VL > 128, then use Imm[3:2],Imm[5:4],Imm[7:6]) + provide 2 select constrols for each element of the + destination. */ + if (imode == E_DFmode) + sel_idx = (i & 1) * elems + (i & ~1) + + ((shuffle_mask >> i) & 1); + else + { + /* Imm[7:0](if VL > 128, also use Imm[7:0]) provide 4 select + controls for each element of the destination. */ + unsigned j = i % 4; + sel_idx = ((i >> 1) & 1) * elems + (i & ~3) + + ((shuffle_mask >> 2 * j) & 3); + } + elts.quick_push (build_int_cst (itype, sel_idx)); + } + + tree perm_mask = elts.build (); arg1 = gimple_call_arg (stmt, 1); - tree itype = long_long_integer_type_node; - tree vtype = build_vector_type (itype, 2); /* V2DI */ - tree_vector_builder elts (vtype, 2, 1); - /* Ignore bits other than the lowest 2. */ - elts.quick_push (build_int_cst (itype, imask & 1)); - imask >>= 1; - elts.quick_push (build_int_cst (itype, 2 + (imask & 1))); - tree omask = elts.build (); gimple *g = gimple_build_assign (gimple_call_lhs (stmt), VEC_PERM_EXPR, - arg0, arg1, omask); + arg0, arg1, perm_mask); gimple_set_location (g, loc); gsi_replace (gsi, g, false); return true; diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md index 41d8562..528116d 100644 --- a/gcc/config/i386/i386.md +++ b/gcc/config/i386/i386.md @@ -12728,8 +12728,7 @@ (clobber (reg:CC FLAGS_REG))])] { operands[0] = lowpart_subreg (SImode, operands[0], <MODE>mode); - if (MEM_P (operands[1])) - operands[1] = force_reg (<MODE>mode, operands[1]); + operands[1] = force_reg (<MODE>mode, operands[1]); operands[1] = lowpart_subreg (SImode, operands[1], <MODE>mode); }) @@ -19352,10 +19351,8 @@ [(set (match_dup 0) (if_then_else:SWI248 (match_dup 1) (match_dup 2) (match_dup 3)))] { - if (MEM_P (operands[2])) - operands[2] = force_reg (<MODE>mode, operands[2]); - if (MEM_P (operands[3])) - operands[3] = force_reg (<MODE>mode, operands[3]); + operands[2] = force_reg (<MODE>mode, operands[2]); + operands[3] = force_reg (<MODE>mode, operands[3]); }) (define_insn "*movqicc_noc" @@ -19475,8 +19472,8 @@ (match_dup 0)))] { operands[7] = SET_DEST (XVECEXP (PATTERN (peep2_next_insn (1)), 0, 0)); - operands[8] = replace_rtx (operands[5], operands[0], operands[1]); - operands[9] = replace_rtx (operands[6], operands[0], operands[1]); + operands[8] = replace_rtx (operands[5], operands[0], operands[1], true); + operands[9] = replace_rtx (operands[6], operands[0], operands[1], true); }) ;; Eliminate a reg-reg mov by inverting the condition of a cmov (#2). @@ -19507,8 +19504,8 @@ (match_dup 0)))] { operands[7] = SET_DEST (XVECEXP (PATTERN (peep2_next_insn (2)), 0, 0)); - operands[8] = replace_rtx (operands[5], operands[0], operands[1]); - operands[9] = replace_rtx (operands[6], operands[0], operands[1]); + operands[8] = replace_rtx (operands[5], operands[0], operands[1], true); + operands[9] = replace_rtx (operands[6], operands[0], operands[1], true); }) (define_expand "mov<mode>cc" @@ -19603,10 +19600,8 @@ [(set (match_dup 0) (if_then_else:MODEF (match_dup 1) (match_dup 2) (match_dup 3)))] { - if (MEM_P (operands[2])) - operands[2] = force_reg (<MODE>mode, operands[2]); - if (MEM_P (operands[3])) - operands[3] = force_reg (<MODE>mode, operands[3]); + operands[2] = force_reg (<MODE>mode, operands[2]); + operands[3] = force_reg (<MODE>mode, operands[3]); }) ;; Don't do conditional moves with memory inputs diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md index 03fc2df..ac0c463 100644 --- a/gcc/config/i386/sse.md +++ b/gcc/config/i386/sse.md @@ -14318,10 +14318,10 @@ (match_dup 2)))] "TARGET_SSE" { + operands[2] = CONSTM1_RTX (<MODE>mode); + if (!TARGET_AVX512F) - operands[2] = force_reg (<MODE>mode, CONSTM1_RTX (<MODE>mode)); - else - operands[2] = CONSTM1_RTX (<MODE>mode); + operands[2] = force_reg (<MODE>mode, operands[2]); }) (define_insn "<mask_codefor>one_cmpl<mode>2<mask_name>" @@ -24205,8 +24205,9 @@ "TARGET_AVX512F" { operands[5] - = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, operands[0], operands[2], - operands[4]), UNSPEC_VSIBADDR); + = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, operands[0], operands[2], + operands[4], operands[1]), + UNSPEC_VSIBADDR); }) (define_insn "*avx512f_scattersi<VI48F:mode>" @@ -24214,10 +24215,11 @@ [(unspec:P [(match_operand:P 0 "vsib_address_operand" "Tv") (match_operand:<VEC_GATHER_IDXSI> 2 "register_operand" "v") - (match_operand:SI 4 "const1248_operand" "n")] + (match_operand:SI 4 "const1248_operand" "n") + (match_operand:<avx512fmaskmode> 6 "register_operand" "1")] UNSPEC_VSIBADDR)]) (unspec:VI48F - [(match_operand:<avx512fmaskmode> 6 "register_operand" "1") + [(match_dup 6) (match_operand:VI48F 3 "register_operand" "v")] UNSPEC_SCATTER)) (clobber (match_scratch:<avx512fmaskmode> 1 "=&Yk"))] @@ -24243,8 +24245,9 @@ "TARGET_AVX512F" { operands[5] - = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, operands[0], operands[2], - operands[4]), UNSPEC_VSIBADDR); + = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, operands[0], operands[2], + operands[4], operands[1]), + UNSPEC_VSIBADDR); }) (define_insn "*avx512f_scatterdi<VI48F:mode>" @@ -24252,10 +24255,11 @@ [(unspec:P [(match_operand:P 0 "vsib_address_operand" "Tv") (match_operand:<VEC_GATHER_IDXDI> 2 "register_operand" "v") - (match_operand:SI 4 "const1248_operand" "n")] + (match_operand:SI 4 "const1248_operand" "n") + (match_operand:QI 6 "register_operand" "1")] UNSPEC_VSIBADDR)]) (unspec:VI48F - [(match_operand:QI 6 "register_operand" "1") + [(match_dup 6) (match_operand:<VEC_GATHER_SRCDI> 3 "register_operand" "v")] UNSPEC_SCATTER)) (clobber (match_scratch:QI 1 "=&Yk"))] diff --git a/gcc/config/rs6000/rs6000-builtin-new.def b/gcc/config/rs6000/rs6000-builtin-new.def index b6fc994..3e732ce 100644 --- a/gcc/config/rs6000/rs6000-builtin-new.def +++ b/gcc/config/rs6000/rs6000-builtin-new.def @@ -1106,6 +1106,33 @@ VEC_SET_V8HI nothing {set} +; Cell builtins. +[cell] + pure vsc __builtin_altivec_lvlx (signed long, const void *); + LVLX altivec_lvlx {ldvec} + + pure vsc __builtin_altivec_lvlxl (signed long, const void *); + LVLXL altivec_lvlxl {ldvec} + + pure vsc __builtin_altivec_lvrx (signed long, const void *); + LVRX altivec_lvrx {ldvec} + + pure vsc __builtin_altivec_lvrxl (signed long, const void *); + LVRXL altivec_lvrxl {ldvec} + + void __builtin_altivec_stvlx (vsc, signed long, void *); + STVLX altivec_stvlx {stvec} + + void __builtin_altivec_stvlxl (vsc, signed long, void *); + STVLXL altivec_stvlxl {stvec} + + void __builtin_altivec_stvrx (vsc, signed long, void *); + STVRX altivec_stvrx {stvec} + + void __builtin_altivec_stvrxl (vsc, signed long, void *); + STVRXL altivec_stvrxl {stvec} + + ; VSX builtins. [vsx] pure vd __builtin_altivec_lvx_v2df (signed long, const void *); @@ -2808,6 +2835,221 @@ XL_LEN_R xl_len_r {} +; Builtins requiring hardware support for IEEE-128 floating-point. +[ieee128-hw] + fpmath _Float128 __builtin_addf128_round_to_odd (_Float128, _Float128); + ADDF128_ODD addkf3_odd {} + + fpmath _Float128 __builtin_divf128_round_to_odd (_Float128, _Float128); + DIVF128_ODD divkf3_odd {} + + fpmath _Float128 __builtin_fmaf128_round_to_odd (_Float128, _Float128, _Float128); + FMAF128_ODD fmakf4_odd {} + + fpmath _Float128 __builtin_mulf128_round_to_odd (_Float128, _Float128); + MULF128_ODD mulkf3_odd {} + + const signed int __builtin_vsx_scalar_cmp_exp_qp_eq (_Float128, _Float128); + VSCEQPEQ xscmpexpqp_eq_kf {} + + const signed int __builtin_vsx_scalar_cmp_exp_qp_gt (_Float128, _Float128); + VSCEQPGT xscmpexpqp_gt_kf {} + + const signed int __builtin_vsx_scalar_cmp_exp_qp_lt (_Float128, _Float128); + VSCEQPLT xscmpexpqp_lt_kf {} + + const signed int __builtin_vsx_scalar_cmp_exp_qp_unordered (_Float128, _Float128); + VSCEQPUO xscmpexpqp_unordered_kf {} + + fpmath _Float128 __builtin_sqrtf128_round_to_odd (_Float128); + SQRTF128_ODD sqrtkf2_odd {} + + fpmath _Float128 __builtin_subf128_round_to_odd (_Float128, _Float128); + SUBF128_ODD subkf3_odd {} + + fpmath double __builtin_truncf128_round_to_odd (_Float128); + TRUNCF128_ODD trunckfdf2_odd {} + + const signed long long __builtin_vsx_scalar_extract_expq (_Float128); + VSEEQP xsxexpqp_kf {} + + const signed __int128 __builtin_vsx_scalar_extract_sigq (_Float128); + VSESQP xsxsigqp_kf {} + + const _Float128 __builtin_vsx_scalar_insert_exp_q (unsigned __int128, unsigned long long); + VSIEQP xsiexpqp_kf {} + + const _Float128 __builtin_vsx_scalar_insert_exp_qp (_Float128, unsigned long long); + VSIEQPF xsiexpqpf_kf {} + + const signed int __builtin_vsx_scalar_test_data_class_qp (_Float128, const int<7>); + VSTDCQP xststdcqp_kf {} + + const signed int __builtin_vsx_scalar_test_neg_qp (_Float128); + VSTDCNQP xststdcnegqp_kf {} + + + +; Decimal floating-point builtins. +[dfp] + const _Decimal64 __builtin_ddedpd (const int<2>, _Decimal64); + DDEDPD dfp_ddedpd_dd {} + + const _Decimal128 __builtin_ddedpdq (const int<2>, _Decimal128); + DDEDPDQ dfp_ddedpd_td {} + + const _Decimal64 __builtin_denbcd (const int<1>, _Decimal64); + DENBCD dfp_denbcd_dd {} + + const _Decimal128 __builtin_denbcdq (const int<1>, _Decimal128); + DENBCDQ dfp_denbcd_td {} + + const _Decimal128 __builtin_denb2dfp_v16qi (vsc); + DENB2DFP_V16QI dfp_denbcd_v16qi {} + + const _Decimal64 __builtin_diex (signed long long, _Decimal64); + DIEX dfp_diex_dd {} + + const _Decimal128 __builtin_diexq (signed long long, _Decimal128); + DIEXQ dfp_diex_td {} + + const _Decimal64 __builtin_dscli (_Decimal64, const int<6>); + DSCLI dfp_dscli_dd {} + + const _Decimal128 __builtin_dscliq (_Decimal128, const int<6>); + DSCLIQ dfp_dscli_td {} + + const _Decimal64 __builtin_dscri (_Decimal64, const int<6>); + DSCRI dfp_dscri_dd {} + + const _Decimal128 __builtin_dscriq (_Decimal128, const int<6>); + DSCRIQ dfp_dscri_td {} + + const signed long long __builtin_dxex (_Decimal64); + DXEX dfp_dxex_dd {} + + const signed long long __builtin_dxexq (_Decimal128); + DXEXQ dfp_dxex_td {} + + const _Decimal128 __builtin_pack_dec128 (unsigned long long, unsigned long long); + PACK_TD packtd {} + + void __builtin_set_fpscr_drn (const int[0,7]); + SET_FPSCR_DRN rs6000_set_fpscr_drn {} + + const unsigned long __builtin_unpack_dec128 (_Decimal128, const int<1>); + UNPACK_TD unpacktd {} + + +[crypto] + const vull __builtin_crypto_vcipher (vull, vull); + VCIPHER crypto_vcipher_v2di {} + + const vuc __builtin_crypto_vcipher_be (vuc, vuc); + VCIPHER_BE crypto_vcipher_v16qi {} + + const vull __builtin_crypto_vcipherlast (vull, vull); + VCIPHERLAST crypto_vcipherlast_v2di {} + + const vuc __builtin_crypto_vcipherlast_be (vuc, vuc); + VCIPHERLAST_BE crypto_vcipherlast_v16qi {} + + const vull __builtin_crypto_vncipher (vull, vull); + VNCIPHER crypto_vncipher_v2di {} + + const vuc __builtin_crypto_vncipher_be (vuc, vuc); + VNCIPHER_BE crypto_vncipher_v16qi {} + + const vull __builtin_crypto_vncipherlast (vull, vull); + VNCIPHERLAST crypto_vncipherlast_v2di {} + + const vuc __builtin_crypto_vncipherlast_be (vuc, vuc); + VNCIPHERLAST_BE crypto_vncipherlast_v16qi {} + + const vull __builtin_crypto_vsbox (vull); + VSBOX crypto_vsbox_v2di {} + + const vuc __builtin_crypto_vsbox_be (vuc); + VSBOX_BE crypto_vsbox_v16qi {} + + const vull __builtin_crypto_vshasigmad (vull, const int<1>, const int<4>); + VSHASIGMAD crypto_vshasigmad {} + + const vui __builtin_crypto_vshasigmaw (vui, const int<1>, const int<4>); + VSHASIGMAW crypto_vshasigmaw {} + + +[htm] + unsigned long long __builtin_get_texasr (); + GET_TEXASR nothing {htm,htmspr} + + unsigned long long __builtin_get_texasru (); + GET_TEXASRU nothing {htm,htmspr} + + unsigned long long __builtin_get_tfhar (); + GET_TFHAR nothing {htm,htmspr} + + unsigned long long __builtin_get_tfiar (); + GET_TFIAR nothing {htm,htmspr} + + void __builtin_set_texasr (unsigned long long); + SET_TEXASR nothing {htm,htmspr} + + void __builtin_set_texasru (unsigned long long); + SET_TEXASRU nothing {htm,htmspr} + + void __builtin_set_tfhar (unsigned long long); + SET_TFHAR nothing {htm,htmspr} + + void __builtin_set_tfiar (unsigned long long); + SET_TFIAR nothing {htm,htmspr} + + unsigned int __builtin_tabort (unsigned int); + TABORT tabort {htm,htmcr} + + unsigned int __builtin_tabortdc (unsigned long long, unsigned long long, unsigned long long); + TABORTDC tabortdc {htm,htmcr} + + unsigned int __builtin_tabortdci (unsigned long long, unsigned long long, unsigned long long); + TABORTDCI tabortdci {htm,htmcr} + + unsigned int __builtin_tabortwc (unsigned int, unsigned int, unsigned int); + TABORTWC tabortwc {htm,htmcr} + + unsigned int __builtin_tabortwci (unsigned int, unsigned int, unsigned int); + TABORTWCI tabortwci {htm,htmcr} + + unsigned int __builtin_tbegin (unsigned int); + TBEGIN tbegin {htm,htmcr} + + unsigned int __builtin_tcheck (); + TCHECK tcheck {htm,htmcr} + + unsigned int __builtin_tend (unsigned int); + TEND tend {htm,htmcr} + + unsigned int __builtin_tendall (); + TENDALL tend {htm,htmcr} + + unsigned int __builtin_trechkpt (); + TRECHKPT trechkpt {htm,htmcr} + + unsigned int __builtin_treclaim (unsigned int); + TRECLAIM treclaim {htm,htmcr} + + unsigned int __builtin_tresume (); + TRESUME tsr {htm,htmcr} + + unsigned int __builtin_tsr (unsigned int); + TSR tsr {htm,htmcr} + + unsigned int __builtin_tsuspend (); + TSUSPEND tsr {htm,htmcr} + + unsigned int __builtin_ttest (); + TTEST ttest {htm,htmcr} + + [power10] const vbq __builtin_altivec_cmpge_1ti (vsq, vsq); CMPGE_1TI vector_nltv1ti {} @@ -3329,3 +3571,419 @@ const unsigned long long __builtin_pextd (unsigned long long, unsigned long long); PEXTD pextd {} + + +[mma] + void __builtin_mma_assemble_acc (v512 *, vuc, vuc, vuc, vuc); + ASSEMBLE_ACC nothing {mma} + + v512 __builtin_mma_assemble_acc_internal (vuc, vuc, vuc, vuc); + ASSEMBLE_ACC_INTERNAL mma_assemble_acc {mma} + + void __builtin_mma_assemble_pair (v256 *, vuc, vuc); + ASSEMBLE_PAIR nothing {mma} + + v256 __builtin_mma_assemble_pair_internal (vuc, vuc); + ASSEMBLE_PAIR_INTERNAL vsx_assemble_pair {mma} + + void __builtin_mma_build_acc (v512 *, vuc, vuc, vuc, vuc); + BUILD_ACC nothing {mma} + + v512 __builtin_mma_build_acc_internal (vuc, vuc, vuc, vuc); + BUILD_ACC_INTERNAL mma_assemble_acc {mma} + + void __builtin_mma_disassemble_acc (void *, v512 *); + DISASSEMBLE_ACC nothing {mma,quad} + + vuc __builtin_mma_disassemble_acc_internal (v512, const int<2>); + DISASSEMBLE_ACC_INTERNAL mma_disassemble_acc {mma} + + void __builtin_mma_disassemble_pair (void *, v256 *); + DISASSEMBLE_PAIR nothing {mma,pair} + + vuc __builtin_mma_disassemble_pair_internal (v256, const int<2>); + DISASSEMBLE_PAIR_INTERNAL vsx_disassemble_pair {mma} + + void __builtin_mma_pmxvbf16ger2 (v512 *, vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVBF16GER2 nothing {mma} + + v512 __builtin_mma_pmxvbf16ger2_internal (vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVBF16GER2_INTERNAL mma_pmxvbf16ger2 {mma} + + void __builtin_mma_pmxvbf16ger2nn (v512 *, vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVBF16GER2NN nothing {mma,quad} + + v512 __builtin_mma_pmxvbf16ger2nn_internal (v512, vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVBF16GER2NN_INTERNAL mma_pmxvbf16ger2nn {mma,quad} + + void __builtin_mma_pmxvbf16ger2np (v512 *, vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVBF16GER2NP nothing {mma,quad} + + v512 __builtin_mma_pmxvbf16ger2np_internal (v512, vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVBF16GER2NP_INTERNAL mma_pmxvbf16ger2np {mma,quad} + + void __builtin_mma_pmxvbf16ger2pn (v512 *, vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVBF16GER2PN nothing {mma,quad} + + v512 __builtin_mma_pmxvbf16ger2pn_internal (v512, vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVBF16GER2PN_INTERNAL mma_pmxvbf16ger2pn {mma,quad} + + void __builtin_mma_pmxvbf16ger2pp (v512 *, vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVBF16GER2PP nothing {mma,quad} + + v512 __builtin_mma_pmxvbf16ger2pp_internal (v512, vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVBF16GER2PP_INTERNAL mma_pmxvbf16ger2pp {mma,quad} + + void __builtin_mma_pmxvf16ger2 (v512 *, vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVF16GER2 nothing {mma} + + v512 __builtin_mma_pmxvf16ger2_internal (vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVF16GER2_INTERNAL mma_pmxvf16ger2 {mma} + + void __builtin_mma_pmxvf16ger2nn (v512 *, vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVF16GER2NN nothing {mma,quad} + + v512 __builtin_mma_pmxvf16ger2nn_internal (v512, vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVF16GER2NN_INTERNAL mma_pmxvf16ger2nn {mma,quad} + + void __builtin_mma_pmxvf16ger2np (v512 *, vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVF16GER2NP nothing {mma,quad} + + v512 __builtin_mma_pmxvf16ger2np_internal (v512, vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVF16GER2NP_INTERNAL mma_pmxvf16ger2np {mma,quad} + + void __builtin_mma_pmxvf16ger2pn (v512 *, vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVF16GER2PN nothing {mma,quad} + + v512 __builtin_mma_pmxvf16ger2pn_internal (v512, vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVF16GER2PN_INTERNAL mma_pmxvf16ger2pn {mma,quad} + + void __builtin_mma_pmxvf16ger2pp (v512 *, vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVF16GER2PP nothing {mma,quad} + + v512 __builtin_mma_pmxvf16ger2pp_internal (v512, vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVF16GER2PP_INTERNAL mma_pmxvf16ger2pp {mma,quad} + + void __builtin_mma_pmxvf32ger (v512 *, vuc, vuc, const int<4>, const int<4>); + PMXVF32GER nothing {mma} + + v512 __builtin_mma_pmxvf32ger_internal (vuc, vuc, const int<4>, const int<4>); + PMXVF32GER_INTERNAL mma_pmxvf32ger {mma} + + void __builtin_mma_pmxvf32gernn (v512 *, vuc, vuc, const int<4>, const int<4>); + PMXVF32GERNN nothing {mma,quad} + + v512 __builtin_mma_pmxvf32gernn_internal (v512, vuc, vuc, const int<4>, const int<4>); + PMXVF32GERNN_INTERNAL mma_pmxvf32gernn {mma,quad} + + void __builtin_mma_pmxvf32gernp (v512 *, vuc, vuc, const int<4>, const int<4>); + PMXVF32GERNP nothing {mma,quad} + + v512 __builtin_mma_pmxvf32gernp_internal (v512, vuc, vuc, const int<4>, const int<4>); + PMXVF32GERNP_INTERNAL mma_pmxvf32gernp {mma,quad} + + void __builtin_mma_pmxvf32gerpn (v512 *, vuc, vuc, const int<4>, const int<4>); + PMXVF32GERPN nothing {mma,quad} + + v512 __builtin_mma_pmxvf32gerpn_internal (v512, vuc, vuc, const int<4>, const int<4>); + PMXVF32GERPN_INTERNAL mma_pmxvf32gerpn {mma,quad} + + void __builtin_mma_pmxvf32gerpp (v512 *, vuc, vuc, const int<4>, const int<4>); + PMXVF32GERPP nothing {mma,quad} + + v512 __builtin_mma_pmxvf32gerpp_internal (v512, vuc, vuc, const int<4>, const int<4>); + PMXVF32GERPP_INTERNAL mma_pmxvf32gerpp {mma,quad} + + void __builtin_mma_pmxvf64ger (v512 *, v256, vuc, const int<4>, const int<2>); + PMXVF64GER nothing {mma,pair} + + v512 __builtin_mma_pmxvf64ger_internal (v256, vuc, const int<4>, const int<2>); + PMXVF64GER_INTERNAL mma_pmxvf64ger {mma,pair} + + void __builtin_mma_pmxvf64gernn (v512 *, v256, vuc, const int<4>, const int<2>); + PMXVF64GERNN nothing {mma,pair,quad} + + v512 __builtin_mma_pmxvf64gernn_internal (v512, v256, vuc, const int<4>, const int<2>); + PMXVF64GERNN_INTERNAL mma_pmxvf64gernn {mma,pair,quad} + + void __builtin_mma_pmxvf64gernp (v512 *, v256, vuc, const int<4>, const int<2>); + PMXVF64GERNP nothing {mma,pair,quad} + + v512 __builtin_mma_pmxvf64gernp_internal (v512, v256, vuc, const int<4>, const int<2>); + PMXVF64GERNP_INTERNAL mma_pmxvf64gernp {mma,pair,quad} + + void __builtin_mma_pmxvf64gerpn (v512 *, v256, vuc, const int<4>, const int<2>); + PMXVF64GERPN nothing {mma,pair,quad} + + v512 __builtin_mma_pmxvf64gerpn_internal (v512, v256, vuc, const int<4>, const int<2>); + PMXVF64GERPN_INTERNAL mma_pmxvf64gerpn {mma,pair,quad} + + void __builtin_mma_pmxvf64gerpp (v512 *, v256, vuc, const int<4>, const int<2>); + PMXVF64GERPP nothing {mma,pair,quad} + + v512 __builtin_mma_pmxvf64gerpp_internal (v512, v256, vuc, const int<4>, const int<2>); + PMXVF64GERPP_INTERNAL mma_pmxvf64gerpp {mma,pair,quad} + + void __builtin_mma_pmxvi16ger2 (v512 *, vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVI16GER2 nothing {mma} + + v512 __builtin_mma_pmxvi16ger2_internal (vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVI16GER2_INTERNAL mma_pmxvi16ger2 {mma} + + void __builtin_mma_pmxvi16ger2pp (v512 *, vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVI16GER2PP nothing {mma,quad} + + v512 __builtin_mma_pmxvi16ger2pp_internal (v512, vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVI16GER2PP_INTERNAL mma_pmxvi16ger2pp {mma,quad} + + void __builtin_mma_pmxvi16ger2s (v512 *, vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVI16GER2S nothing {mma} + + v512 __builtin_mma_pmxvi16ger2s_internal (vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVI16GER2S_INTERNAL mma_pmxvi16ger2s {mma} + + void __builtin_mma_pmxvi16ger2spp (v512 *, vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVI16GER2SPP nothing {mma,quad} + + v512 __builtin_mma_pmxvi16ger2spp_internal (v512, vuc, vuc, const int<4>, const int<4>, const int<2>); + PMXVI16GER2SPP_INTERNAL mma_pmxvi16ger2spp {mma,quad} + + void __builtin_mma_pmxvi4ger8 (v512 *, vuc, vuc, const int<4>, const int<4>, const int<8>); + PMXVI4GER8 nothing {mma} + + v512 __builtin_mma_pmxvi4ger8_internal (vuc, vuc, const int<4>, const int<4>, const int<8>); + PMXVI4GER8_INTERNAL mma_pmxvi4ger8 {mma} + + void __builtin_mma_pmxvi4ger8pp (v512 *, vuc, vuc, const int<4>, const int<4>, const int<4>); + PMXVI4GER8PP nothing {mma,quad} + + v512 __builtin_mma_pmxvi4ger8pp_internal (v512, vuc, vuc, const int<4>, const int<4>, const int<4>); + PMXVI4GER8PP_INTERNAL mma_pmxvi4ger8pp {mma,quad} + + void __builtin_mma_pmxvi8ger4 (v512 *, vuc, vuc, const int<4>, const int<4>, const int<4>); + PMXVI8GER4 nothing {mma} + + v512 __builtin_mma_pmxvi8ger4_internal (vuc, vuc, const int<4>, const int<4>, const int<4>); + PMXVI8GER4_INTERNAL mma_pmxvi8ger4 {mma} + + void __builtin_mma_pmxvi8ger4pp (v512 *, vuc, vuc, const int<4>, const int<4>, const int<4>); + PMXVI8GER4PP nothing {mma,quad} + + v512 __builtin_mma_pmxvi8ger4pp_internal (v512, vuc, vuc, const int<4>, const int<4>, const int<4>); + PMXVI8GER4PP_INTERNAL mma_pmxvi8ger4pp {mma,quad} + + void __builtin_mma_pmxvi8ger4spp (v512 *, vuc, vuc, const int<4>, const int<4>, const int<4>); + PMXVI8GER4SPP nothing {mma,quad} + + v512 __builtin_mma_pmxvi8ger4spp_internal (v512, vuc, vuc, const int<4>, const int<4>, const int<4>); + PMXVI8GER4SPP_INTERNAL mma_pmxvi8ger4spp {mma,quad} + + void __builtin_mma_xvbf16ger2 (v512 *, vuc, vuc); + XVBF16GER2 nothing {mma} + + v512 __builtin_mma_xvbf16ger2_internal (vuc, vuc); + XVBF16GER2_INTERNAL mma_xvbf16ger2 {mma} + + void __builtin_mma_xvbf16ger2nn (v512 *, vuc, vuc); + XVBF16GER2NN nothing {mma,quad} + + v512 __builtin_mma_xvbf16ger2nn_internal (v512, vuc, vuc); + XVBF16GER2NN_INTERNAL mma_xvbf16ger2nn {mma,quad} + + void __builtin_mma_xvbf16ger2np (v512 *, vuc, vuc); + XVBF16GER2NP nothing {mma,quad} + + v512 __builtin_mma_xvbf16ger2np_internal (v512, vuc, vuc); + XVBF16GER2NP_INTERNAL mma_xvbf16ger2np {mma,quad} + + void __builtin_mma_xvbf16ger2pn (v512 *, vuc, vuc); + XVBF16GER2PN nothing {mma,quad} + + v512 __builtin_mma_xvbf16ger2pn_internal (v512, vuc, vuc); + XVBF16GER2PN_INTERNAL mma_xvbf16ger2pn {mma,quad} + + void __builtin_mma_xvbf16ger2pp (v512 *, vuc, vuc); + XVBF16GER2PP nothing {mma,quad} + + v512 __builtin_mma_xvbf16ger2pp_internal (v512, vuc, vuc); + XVBF16GER2PP_INTERNAL mma_xvbf16ger2pp {mma,quad} + + void __builtin_mma_xvf16ger2 (v512 *, vuc, vuc); + XVF16GER2 nothing {mma} + + v512 __builtin_mma_xvf16ger2_internal (vuc, vuc); + XVF16GER2_INTERNAL mma_xvf16ger2 {mma} + + void __builtin_mma_xvf16ger2nn (v512 *, vuc, vuc); + XVF16GER2NN nothing {mma,quad} + + v512 __builtin_mma_xvf16ger2nn_internal (v512, vuc, vuc); + XVF16GER2NN_INTERNAL mma_xvf16ger2nn {mma,quad} + + void __builtin_mma_xvf16ger2np (v512 *, vuc, vuc); + XVF16GER2NP nothing {mma,quad} + + v512 __builtin_mma_xvf16ger2np_internal (v512, vuc, vuc); + XVF16GER2NP_INTERNAL mma_xvf16ger2np {mma,quad} + + void __builtin_mma_xvf16ger2pn (v512 *, vuc, vuc); + XVF16GER2PN nothing {mma,quad} + + v512 __builtin_mma_xvf16ger2pn_internal (v512, vuc, vuc); + XVF16GER2PN_INTERNAL mma_xvf16ger2pn {mma,quad} + + void __builtin_mma_xvf16ger2pp (v512 *, vuc, vuc); + XVF16GER2PP nothing {mma,quad} + + v512 __builtin_mma_xvf16ger2pp_internal (v512, vuc, vuc); + XVF16GER2PP_INTERNAL mma_xvf16ger2pp {mma,quad} + + void __builtin_mma_xvf32ger (v512 *, vuc, vuc); + XVF32GER nothing {mma} + + v512 __builtin_mma_xvf32ger_internal (vuc, vuc); + XVF32GER_INTERNAL mma_xvf32ger {mma} + + void __builtin_mma_xvf32gernn (v512 *, vuc, vuc); + XVF32GERNN nothing {mma,quad} + + v512 __builtin_mma_xvf32gernn_internal (v512, vuc, vuc); + XVF32GERNN_INTERNAL mma_xvf32gernn {mma,quad} + + void __builtin_mma_xvf32gernp (v512 *, vuc, vuc); + XVF32GERNP nothing {mma,quad} + + v512 __builtin_mma_xvf32gernp_internal (v512, vuc, vuc); + XVF32GERNP_INTERNAL mma_xvf32gernp {mma,quad} + + void __builtin_mma_xvf32gerpn (v512 *, vuc, vuc); + XVF32GERPN nothing {mma,quad} + + v512 __builtin_mma_xvf32gerpn_internal (v512, vuc, vuc); + XVF32GERPN_INTERNAL mma_xvf32gerpn {mma,quad} + + void __builtin_mma_xvf32gerpp (v512 *, vuc, vuc); + XVF32GERPP nothing {mma,quad} + + v512 __builtin_mma_xvf32gerpp_internal (v512, vuc, vuc); + XVF32GERPP_INTERNAL mma_xvf32gerpp {mma,quad} + + void __builtin_mma_xvf64ger (v512 *, v256, vuc); + XVF64GER nothing {mma,pair} + + v512 __builtin_mma_xvf64ger_internal (v256, vuc); + XVF64GER_INTERNAL mma_xvf64ger {mma,pair} + + void __builtin_mma_xvf64gernn (v512 *, v256, vuc); + XVF64GERNN nothing {mma,pair,quad} + + v512 __builtin_mma_xvf64gernn_internal (v512, v256, vuc); + XVF64GERNN_INTERNAL mma_xvf64gernn {mma,pair,quad} + + void __builtin_mma_xvf64gernp (v512 *, v256, vuc); + XVF64GERNP nothing {mma,pair,quad} + + v512 __builtin_mma_xvf64gernp_internal (v512, v256, vuc); + XVF64GERNP_INTERNAL mma_xvf64gernp {mma,pair,quad} + + void __builtin_mma_xvf64gerpn (v512 *, v256, vuc); + XVF64GERPN nothing {mma,pair,quad} + + v512 __builtin_mma_xvf64gerpn_internal (v512, v256, vuc); + XVF64GERPN_INTERNAL mma_xvf64gerpn {mma,pair,quad} + + void __builtin_mma_xvf64gerpp (v512 *, v256, vuc); + XVF64GERPP nothing {mma,pair,quad} + + v512 __builtin_mma_xvf64gerpp_internal (v512, v256, vuc); + XVF64GERPP_INTERNAL mma_xvf64gerpp {mma,pair,quad} + + void __builtin_mma_xvi16ger2 (v512 *, vuc, vuc); + XVI16GER2 nothing {mma} + + v512 __builtin_mma_xvi16ger2_internal (vuc, vuc); + XVI16GER2_INTERNAL mma_xvi16ger2 {mma} + + void __builtin_mma_xvi16ger2pp (v512 *, vuc, vuc); + XVI16GER2PP nothing {mma,quad} + + v512 __builtin_mma_xvi16ger2pp_internal (v512, vuc, vuc); + XVI16GER2PP_INTERNAL mma_xvi16ger2pp {mma,quad} + + void __builtin_mma_xvi16ger2s (v512 *, vuc, vuc); + XVI16GER2S nothing {mma} + + v512 __builtin_mma_xvi16ger2s_internal (vuc, vuc); + XVI16GER2S_INTERNAL mma_xvi16ger2s {mma} + + void __builtin_mma_xvi16ger2spp (v512 *, vuc, vuc); + XVI16GER2SPP nothing {mma,quad} + + v512 __builtin_mma_xvi16ger2spp_internal (v512, vuc, vuc); + XVI16GER2SPP_INTERNAL mma_xvi16ger2spp {mma,quad} + + void __builtin_mma_xvi4ger8 (v512 *, vuc, vuc); + XVI4GER8 nothing {mma} + + v512 __builtin_mma_xvi4ger8_internal (vuc, vuc); + XVI4GER8_INTERNAL mma_xvi4ger8 {mma} + + void __builtin_mma_xvi4ger8pp (v512 *, vuc, vuc); + XVI4GER8PP nothing {mma,quad} + + v512 __builtin_mma_xvi4ger8pp_internal (v512, vuc, vuc); + XVI4GER8PP_INTERNAL mma_xvi4ger8pp {mma,quad} + + void __builtin_mma_xvi8ger4 (v512 *, vuc, vuc); + XVI8GER4 nothing {mma} + + v512 __builtin_mma_xvi8ger4_internal (vuc, vuc); + XVI8GER4_INTERNAL mma_xvi8ger4 {mma} + + void __builtin_mma_xvi8ger4pp (v512 *, vuc, vuc); + XVI8GER4PP nothing {mma,quad} + + v512 __builtin_mma_xvi8ger4pp_internal (v512, vuc, vuc); + XVI8GER4PP_INTERNAL mma_xvi8ger4pp {mma,quad} + + void __builtin_mma_xvi8ger4spp (v512 *, vuc, vuc); + XVI8GER4SPP nothing {mma,quad} + + v512 __builtin_mma_xvi8ger4spp_internal (v512, vuc, vuc); + XVI8GER4SPP_INTERNAL mma_xvi8ger4spp {mma,quad} + + void __builtin_mma_xxmfacc (v512 *); + XXMFACC nothing {mma,quad} + + v512 __builtin_mma_xxmfacc_internal (v512); + XXMFACC_INTERNAL mma_xxmfacc {mma,quad} + + void __builtin_mma_xxmtacc (v512 *); + XXMTACC nothing {mma,quad} + + v512 __builtin_mma_xxmtacc_internal (v512); + XXMTACC_INTERNAL mma_xxmtacc {mma,quad} + + void __builtin_mma_xxsetaccz (v512 *); + XXSETACCZ nothing {mma} + + v512 __builtin_mma_xxsetaccz_internal (); + XXSETACCZ_INTERNAL mma_xxsetaccz {mma} + + void __builtin_vsx_assemble_pair (v256 *, vuc, vuc); + ASSEMBLE_PAIR_V nothing {mma} + + v256 __builtin_vsx_assemble_pair_internal (vuc, vuc); + ASSEMBLE_PAIR_V_INTERNAL vsx_assemble_pair {mma} + + void __builtin_vsx_build_pair (v256 *, vuc, vuc); + BUILD_PAIR nothing {mma} + + v256 __builtin_vsx_build_pair_internal (vuc, vuc); + BUILD_PAIR_INTERNAL vsx_assemble_pair {mma} + + void __builtin_vsx_disassemble_pair (void *, v256 *); + DISASSEMBLE_PAIR_V nothing {mma,pair} + + vuc __builtin_vsx_disassemble_pair_internal (v256, const int<2>); + DISASSEMBLE_PAIR_V_INTERNAL vsx_disassemble_pair {mma} diff --git a/gcc/config/rs6000/rs6000-call.c b/gcc/config/rs6000/rs6000-call.c index fd7f24d..3a07118 100644 --- a/gcc/config/rs6000/rs6000-call.c +++ b/gcc/config/rs6000/rs6000-call.c @@ -14823,6 +14823,11 @@ builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0, case P8V_BUILTIN_ORC_V4SI_UNS: case P8V_BUILTIN_ORC_V2DI_UNS: case P8V_BUILTIN_ORC_V1TI_UNS: + case P10_BUILTIN_CFUGED: + case P10_BUILTIN_CNTLZDM: + case P10_BUILTIN_CNTTZDM: + case P10_BUILTIN_PDEPD: + case P10_BUILTIN_PEXTD: case P10V_BUILTIN_VCFUGED: case P10V_BUILTIN_VCLZDM: case P10V_BUILTIN_VCTZDM: diff --git a/gcc/config/rs6000/rs6000-overload.def b/gcc/config/rs6000/rs6000-overload.def index d8028c9..141f831 100644 --- a/gcc/config/rs6000/rs6000-overload.def +++ b/gcc/config/rs6000/rs6000-overload.def @@ -75,8 +75,6091 @@ ; a semicolon are also treated as blank lines. +[BCDADD, __builtin_bcdadd, __builtin_vec_bcdadd] + vsq __builtin_vec_bcdadd (vsq, vsq, const int); + BCDADD_V1TI + vuc __builtin_vec_bcdadd (vuc, vuc, const int); + BCDADD_V16QI + +[BCDADD_EQ, __builtin_bcdadd_eq, __builtin_vec_bcdadd_eq] + signed int __builtin_vec_bcdadd_eq (vsq, vsq, const int); + BCDADD_EQ_V1TI + signed int __builtin_vec_bcdadd_eq (vuc, vuc, const int); + BCDADD_EQ_V16QI + +[BCDADD_GT, __builtin_bcdadd_gt, __builtin_vec_bcdadd_gt] + signed int __builtin_vec_bcdadd_gt (vsq, vsq, const int); + BCDADD_GT_V1TI + signed int __builtin_vec_bcdadd_gt (vuc, vuc, const int); + BCDADD_GT_V16QI + +[BCDADD_LT, __builtin_bcdadd_lt, __builtin_vec_bcdadd_lt] + signed int __builtin_vec_bcdadd_lt (vsq, vsq, const int); + BCDADD_LT_V1TI + signed int __builtin_vec_bcdadd_lt (vuc, vuc, const int); + BCDADD_LT_V16QI + +[BCDADD_OV, __builtin_bcdadd_ov, __builtin_vec_bcdadd_ov] + signed int __builtin_vec_bcdadd_ov (vsq, vsq, const int); + BCDADD_OV_V1TI + signed int __builtin_vec_bcdadd_ov (vuc, vuc, const int); + BCDADD_OV_V16QI + +[BCDDIV10, __builtin_bcddiv10, __builtin_vec_bcddiv10] + vuc __builtin_vec_bcddiv10 (vuc); + BCDDIV10_V16QI + +[BCDINVALID, __builtin_bcdinvalid, __builtin_vec_bcdinvalid] + signed int __builtin_vec_bcdinvalid (vsq); + BCDINVALID_V1TI + signed int __builtin_vec_bcdinvalid (vuc); + BCDINVALID_V16QI + +[BCDMUL10, __builtin_bcdmul10, __builtin_vec_bcdmul10] + vuc __builtin_vec_bcdmul10 (vuc); + BCDMUL10_V16QI + +[BCDSUB, __builtin_bcdsub, __builtin_vec_bcdsub] + vsq __builtin_vec_bcdsub (vsq, vsq, const int); + BCDSUB_V1TI + vuc __builtin_vec_bcdsub (vuc, vuc, const int); + BCDSUB_V16QI + +[BCDSUB_EQ, __builtin_bcdsub_eq, __builtin_vec_bcdsub_eq] + signed int __builtin_vec_bcdsub_eq (vsq, vsq, const int); + BCDSUB_EQ_V1TI + signed int __builtin_vec_bcdsub_eq (vuc, vuc, const int); + BCDSUB_EQ_V16QI + +[BCDSUB_GE, __builtin_bcdsub_ge, __builtin_vec_bcdsub_ge] + signed int __builtin_vec_bcdsub_ge (vsq, vsq, const int); + BCDSUB_GE_V1TI + signed int __builtin_vec_bcdsub_ge (vuc, vuc, const int); + BCDSUB_GE_V16QI + +[BCDSUB_GT, __builtin_bcdsub_gt, __builtin_vec_bcdsub_gt] + signed int __builtin_vec_bcdsub_gt (vsq, vsq, const int); + BCDSUB_GT_V1TI + signed int __builtin_vec_bcdsub_gt (vuc, vuc, const int); + BCDSUB_GT_V16QI + +[BCDSUB_LE, __builtin_bcdsub_le, __builtin_vec_bcdsub_le] + signed int __builtin_vec_bcdsub_le (vsq, vsq, const int); + BCDSUB_LE_V1TI + signed int __builtin_vec_bcdsub_le (vuc, vuc, const int); + BCDSUB_LE_V16QI + +[BCDSUB_LT, __builtin_bcdsub_lt, __builtin_vec_bcdsub_lt] + signed int __builtin_vec_bcdsub_lt (vsq, vsq, const int); + BCDSUB_LT_V1TI + signed int __builtin_vec_bcdsub_lt (vuc, vuc, const int); + BCDSUB_LT_V16QI + +[BCDSUB_OV, __builtin_bcdsub_ov, __builtin_vec_bcdsub_ov] + signed int __builtin_vec_bcdsub_ov (vsq, vsq, const int); + BCDSUB_OV_V1TI + signed int __builtin_vec_bcdsub_ov (vuc, vuc, const int); + BCDSUB_OV_V16QI + +[BCD2DFP, __builtin_bcd2dfp, __builtin_vec_denb2dfp] + _Decimal128 __builtin_vec_denb2dfp (vuc); + DENB2DFP_V16QI + +[CRYPTO_PERMXOR, SKIP, __builtin_crypto_vpermxor] + vuc __builtin_crypto_vpermxor (vuc, vuc, vuc); + VPERMXOR_V16QI + vus __builtin_crypto_vpermxor (vus, vus, vus); + VPERMXOR_V8HI + vui __builtin_crypto_vpermxor (vui, vui, vui); + VPERMXOR_V4SI + vull __builtin_crypto_vpermxor (vull, vull, vull); + VPERMXOR_V2DI + +[CRYPTO_PMSUM, SKIP, __builtin_crypto_vpmsum] + vuc __builtin_crypto_vpmsum (vuc, vuc); + VPMSUMB VPMSUMB_C + vus __builtin_crypto_vpmsum (vus, vus); + VPMSUMH VPMSUMH_C + vui __builtin_crypto_vpmsum (vui, vui); + VPMSUMW VPMSUMW_C + vull __builtin_crypto_vpmsum (vull, vull); + VPMSUMD VPMSUMD_C + +[SCAL_CMPB, SKIP, __builtin_cmpb] + unsigned int __builtin_cmpb (unsigned int, unsigned int); + CMPB_32 + unsigned long long __builtin_cmpb (unsigned long long, unsigned long long); + CMPB + [VEC_ABS, vec_abs, __builtin_vec_abs] vsc __builtin_vec_abs (vsc); ABS_V16QI vss __builtin_vec_abs (vss); ABS_V8HI + vsi __builtin_vec_abs (vsi); + ABS_V4SI + vsll __builtin_vec_abs (vsll); + ABS_V2DI + vf __builtin_vec_abs (vf); + ABS_V4SF + vd __builtin_vec_abs (vd); + XVABSDP + +[VEC_ABSD, vec_absd, __builtin_vec_vadu, _ARCH_PWR9] + vuc __builtin_vec_vadu (vuc, vuc); + VADUB + vus __builtin_vec_vadu (vus, vus); + VADUH + vui __builtin_vec_vadu (vui, vui); + VADUW + +[VEC_ABSS, vec_abss, __builtin_vec_abss] + vsc __builtin_vec_abss (vsc); + ABSS_V16QI + vss __builtin_vec_abss (vss); + ABSS_V8HI + vsi __builtin_vec_abss (vsi); + ABSS_V4SI + +[VEC_ADD, vec_add, __builtin_vec_add] + vsc __builtin_vec_add (vsc, vsc); + VADDUBM VADDUBM_VSC + vuc __builtin_vec_add (vuc, vuc); + VADDUBM VADDUBM_VUC + vss __builtin_vec_add (vss, vss); + VADDUHM VADDUHM_VSS + vus __builtin_vec_add (vus, vus); + VADDUHM VADDUHM_VUS + vsi __builtin_vec_add (vsi, vsi); + VADDUWM VADDUWM_VSI + vui __builtin_vec_add (vui, vui); + VADDUWM VADDUWM_VUI + vsll __builtin_vec_add (vsll, vsll); + VADDUDM VADDUDM_VSLL + vull __builtin_vec_add (vull, vull); + VADDUDM VADDUDM_VULL + vsq __builtin_vec_add (vsq, vsq); + VADDUQM VADDUQM_VSQ + vuq __builtin_vec_add (vuq, vuq); + VADDUQM VADDUQM_VUQ + vf __builtin_vec_add (vf, vf); + VADDFP + vd __builtin_vec_add (vd, vd); + XVADDDP +; The following variants are deprecated. + vsc __builtin_vec_add (vbc, vsc); + VADDUBM VADDUBM_VBC_VSC + vsc __builtin_vec_add (vsc, vbc); + VADDUBM VADDUBM_VSC_VBC + vuc __builtin_vec_add (vbc, vuc); + VADDUBM VADDUBM_VBC_VUC + vuc __builtin_vec_add (vuc, vbc); + VADDUBM VADDUBM_VUC_VBC + vss __builtin_vec_add (vbs, vss); + VADDUHM VADDUHM_VBS_VSS + vss __builtin_vec_add (vss, vbs); + VADDUHM VADDUHM_VSS_VBS + vus __builtin_vec_add (vbs, vus); + VADDUHM VADDUHM_VBS_VUS + vus __builtin_vec_add (vus, vbs); + VADDUHM VADDUHM_VUS_VBS + vsi __builtin_vec_add (vbi, vsi); + VADDUWM VADDUWM_VBI_VSI + vsi __builtin_vec_add (vsi, vbi); + VADDUWM VADDUWM_VSI_VBI + vui __builtin_vec_add (vbi, vui); + VADDUWM VADDUWM_VBI_VUI + vui __builtin_vec_add (vui, vbi); + VADDUWM VADDUWM_VUI_VBI + vsll __builtin_vec_add (vbll, vsll); + VADDUDM VADDUDM_VBLL_VSLL + vsll __builtin_vec_add (vsll, vbll); + VADDUDM VADDUDM_VSLL_VBLL + vull __builtin_vec_add (vbll, vull); + VADDUDM VADDUDM_VBLL_VULL + vull __builtin_vec_add (vull, vbll); + VADDUDM VADDUDM_VULL_VBLL + +[VEC_ADDC, vec_addc, __builtin_vec_addc] + vsi __builtin_vec_addc (vsi, vsi); + VADDCUW VADDCUW_VSI + vui __builtin_vec_addc (vui, vui); + VADDCUW VADDCUW_VUI + vsq __builtin_vec_addc (vsq, vsq); + VADDCUQ VADDCUQ_VSQ + vuq __builtin_vec_addc (vuq, vuq); + VADDCUQ VADDCUQ_VUQ + +; TODO: Note that the entry for VEC_ADDE currently gets ignored in +; altivec_resolve_overloaded_builtin. Revisit whether we can remove +; that. We still need to register the legal builtin forms here. +[VEC_ADDE, vec_adde, __builtin_vec_adde] + vsq __builtin_vec_adde (vsq, vsq, vsq); + VADDEUQM VADDEUQM_VSQ + vuq __builtin_vec_adde (vuq, vuq, vuq); + VADDEUQM VADDEUQM_VUQ + +; TODO: Note that the entry for VEC_ADDEC currently gets ignored in +; altivec_resolve_overloaded_builtin. Revisit whether we can remove +; that. We still need to register the legal builtin forms here. +[VEC_ADDEC, vec_addec, __builtin_vec_addec] + vsq __builtin_vec_addec (vsq, vsq, vsq); + VADDECUQ VADDECUQ_VSQ + vuq __builtin_vec_addec (vuq, vuq, vuq); + VADDECUQ VADDECUQ_VUQ + +[VEC_ADDS, vec_adds, __builtin_vec_adds] + vuc __builtin_vec_adds (vuc, vuc); + VADDUBS + vsc __builtin_vec_adds (vsc, vsc); + VADDSBS + vus __builtin_vec_adds (vus, vus); + VADDUHS + vss __builtin_vec_adds (vss, vss); + VADDSHS + vui __builtin_vec_adds (vui, vui); + VADDUWS + vsi __builtin_vec_adds (vsi, vsi); + VADDSWS +; The following variants are deprecated. + vuc __builtin_vec_adds (vbc, vuc); + VADDUBS VADDUBS_BU + vuc __builtin_vec_adds (vuc, vbc); + VADDUBS VADDUBS_UB + vsc __builtin_vec_adds (vbc, vsc); + VADDSBS VADDSBS_BS + vsc __builtin_vec_adds (vsc, vbc); + VADDSBS VADDSBS_SB + vus __builtin_vec_adds (vbs, vus); + VADDUHS VADDUHS_BU + vus __builtin_vec_adds (vus, vbs); + VADDUHS VADDUHS_UB + vss __builtin_vec_adds (vbs, vss); + VADDSHS VADDSHS_BS + vss __builtin_vec_adds (vss, vbs); + VADDSHS VADDSHS_SB + vui __builtin_vec_adds (vbi, vui); + VADDUWS VADDUWS_BU + vui __builtin_vec_adds (vui, vbi); + VADDUWS VADDUWS_UB + vsi __builtin_vec_adds (vbi, vsi); + VADDSWS VADDSWS_BS + vsi __builtin_vec_adds (vsi, vbi); + VADDSWS VADDSWS_SB + +[VEC_AND, vec_and, __builtin_vec_and] + vsc __builtin_vec_and (vsc, vsc); + VAND_V16QI + vuc __builtin_vec_and (vuc, vuc); + VAND_V16QI_UNS VAND_VUC + vbc __builtin_vec_and (vbc, vbc); + VAND_V16QI_UNS VAND_VBC + vss __builtin_vec_and (vss, vss); + VAND_V8HI + vus __builtin_vec_and (vus, vus); + VAND_V8HI_UNS VAND_VUS + vbs __builtin_vec_and (vbs, vbs); + VAND_V8HI_UNS VAND_VBS + vsi __builtin_vec_and (vsi, vsi); + VAND_V4SI + vui __builtin_vec_and (vui, vui); + VAND_V4SI_UNS VAND_VUI + vbi __builtin_vec_and (vbi, vbi); + VAND_V4SI_UNS VAND_VBI + vsll __builtin_vec_and (vsll, vsll); + VAND_V2DI + vull __builtin_vec_and (vull, vull); + VAND_V2DI_UNS VAND_VULL + vbll __builtin_vec_and (vbll, vbll); + VAND_V2DI_UNS VAND_VBLL + vf __builtin_vec_and (vf, vf); + VAND_V4SF + vd __builtin_vec_and (vd, vd); + VAND_V2DF +; The following variants are deprecated. + vsc __builtin_vec_and (vsc, vbc); + VAND_V16QI VAND_VSC_VBC + vsc __builtin_vec_and (vbc, vsc); + VAND_V16QI VAND_VBC_VSC + vuc __builtin_vec_and (vuc, vbc); + VAND_V16QI_UNS VAND_VUC_VBC + vuc __builtin_vec_and (vbc, vuc); + VAND_V16QI_UNS VAND_VBC_VUC + vss __builtin_vec_and (vss, vbs); + VAND_V8HI VAND_VSS_VBS + vss __builtin_vec_and (vbs, vss); + VAND_V8HI VAND_VBS_VSS + vus __builtin_vec_and (vus, vbs); + VAND_V8HI_UNS VAND_VUS_VBS + vus __builtin_vec_and (vbs, vus); + VAND_V8HI_UNS VAND_VBS_VUS + vsi __builtin_vec_and (vsi, vbi); + VAND_V4SI VAND_VSI_VBI + vsi __builtin_vec_and (vbi, vsi); + VAND_V4SI VAND_VBI_VSI + vui __builtin_vec_and (vui, vbi); + VAND_V4SI_UNS VAND_VUI_VBI + vui __builtin_vec_and (vbi, vui); + VAND_V4SI_UNS VAND_VBI_VUI + vsll __builtin_vec_and (vsll, vbll); + VAND_V2DI VAND_VSLL_VBLL + vsll __builtin_vec_and (vbll, vsll); + VAND_V2DI VAND_VBLL_VSLL + vull __builtin_vec_and (vull, vbll); + VAND_V2DI_UNS VAND_VULL_VBLL + vull __builtin_vec_and (vbll, vull); + VAND_V2DI_UNS VAND_VBLL_VULL + vf __builtin_vec_and (vf, vbi); + VAND_V4SF VAND_VF_VBI + vf __builtin_vec_and (vbi, vf); + VAND_V4SF VAND_VBI_VF + vd __builtin_vec_and (vd, vbll); + VAND_V2DF VAND_VD_VBLL + vd __builtin_vec_and (vbll, vd); + VAND_V2DF VAND_VBLL_VD + +[VEC_ANDC, vec_andc, __builtin_vec_andc] + vbc __builtin_vec_andc (vbc, vbc); + VANDC_V16QI_UNS VANDC_VBC + vsc __builtin_vec_andc (vsc, vsc); + VANDC_V16QI + vuc __builtin_vec_andc (vuc, vuc); + VANDC_V16QI_UNS VANDC_VUC + vbs __builtin_vec_andc (vbs, vbs); + VANDC_V8HI_UNS VANDC_VBS + vss __builtin_vec_andc (vss, vss); + VANDC_V8HI + vus __builtin_vec_andc (vus, vus); + VANDC_V8HI_UNS VANDC_VUS + vbi __builtin_vec_andc (vbi, vbi); + VANDC_V4SI_UNS VANDC_VBI + vsi __builtin_vec_andc (vsi, vsi); + VANDC_V4SI + vui __builtin_vec_andc (vui, vui); + VANDC_V4SI_UNS VANDC_VUI + vbll __builtin_vec_andc (vbll, vbll); + VANDC_V2DI_UNS VANDC_VBLL + vsll __builtin_vec_andc (vsll, vsll); + VANDC_V2DI + vull __builtin_vec_andc (vull, vull); + VANDC_V2DI_UNS VANDC_VULL + vf __builtin_vec_andc (vf, vf); + VANDC_V4SF + vd __builtin_vec_andc (vd, vd); + VANDC_V2DF +; The following variants are deprecated. + vsc __builtin_vec_andc (vsc, vbc); + VANDC_V16QI VANDC_VSC_VBC + vsc __builtin_vec_andc (vbc, vsc); + VANDC_V16QI VANDC_VBC_VSC + vuc __builtin_vec_andc (vuc, vbc); + VANDC_V16QI_UNS VANDC_VUC_VBC + vuc __builtin_vec_andc (vbc, vuc); + VANDC_V16QI_UNS VANDC_VBC_VUC + vss __builtin_vec_andc (vss, vbs); + VANDC_V8HI VANDC_VSS_VBS + vss __builtin_vec_andc (vbs, vss); + VANDC_V8HI VANDC_VBS_VSS + vus __builtin_vec_andc (vus, vbs); + VANDC_V8HI_UNS VANDC_VUS_VBS + vus __builtin_vec_andc (vbs, vus); + VANDC_V8HI_UNS VANDC_VBS_VUS + vsi __builtin_vec_andc (vsi, vbi); + VANDC_V4SI VANDC_VSI_VBI + vsi __builtin_vec_andc (vbi, vsi); + VANDC_V4SI VANDC_VBI_VSI + vui __builtin_vec_andc (vui, vbi); + VANDC_V4SI_UNS VANDC_VUI_VBI + vui __builtin_vec_andc (vbi, vui); + VANDC_V4SI_UNS VANDC_VBI_VUI + vsll __builtin_vec_andc (vsll, vbll); + VANDC_V2DI VANDC_VSLL_VBLL + vsll __builtin_vec_andc (vbll, vsll); + VANDC_V2DI VANDC_VBLL_VSLL + vull __builtin_vec_andc (vull, vbll); + VANDC_V2DI_UNS VANDC_VULL_VBLL + vull __builtin_vec_andc (vbll, vull); + VANDC_V2DI_UNS VANDC_VBLL_VULL + vf __builtin_vec_andc (vf, vbi); + VANDC_V4SF VANDC_VF_VBI + vf __builtin_vec_andc (vbi, vf); + VANDC_V4SF VANDC_VBI_VF + vd __builtin_vec_andc (vd, vbll); + VANDC_V2DF VANDC_VD_VBLL + vd __builtin_vec_andc (vbll, vd); + VANDC_V2DF VANDC_VBLL_VD + +[VEC_AVG, vec_avg, __builtin_vec_avg] + vsc __builtin_vec_avg (vsc, vsc); + VAVGSB + vuc __builtin_vec_avg (vuc, vuc); + VAVGUB + vss __builtin_vec_avg (vss, vss); + VAVGSH + vus __builtin_vec_avg (vus, vus); + VAVGUH + vsi __builtin_vec_avg (vsi, vsi); + VAVGSW + vui __builtin_vec_avg (vui, vui); + VAVGUW + +[VEC_BLENDV, vec_blendv, __builtin_vec_xxblend, _ARCH_PWR10] + vsc __builtin_vec_xxblend (vsc, vsc, vuc); + VXXBLEND_V16QI VXXBLEND_VSC + vuc __builtin_vec_xxblend (vuc, vuc, vuc); + VXXBLEND_V16QI VXXBLEND_VUC + vss __builtin_vec_xxblend (vss, vss, vus); + VXXBLEND_V8HI VXXBLEND_VSS + vus __builtin_vec_xxblend (vus, vus, vus); + VXXBLEND_V8HI VXXBLEND_VUS + vsi __builtin_vec_xxblend (vsi, vsi, vui); + VXXBLEND_V4SI VXXBLEND_VSI + vui __builtin_vec_xxblend (vui, vui, vui); + VXXBLEND_V4SI VXXBLEND_VUI + vsll __builtin_vec_xxblend (vsll, vsll, vull); + VXXBLEND_V2DI VXXBLEND_VSLL + vull __builtin_vec_xxblend (vull, vull, vull); + VXXBLEND_V2DI VXXBLEND_VULL + vf __builtin_vec_xxblend (vf, vf, vui); + VXXBLEND_V4SF + vd __builtin_vec_xxblend (vd, vd, vull); + VXXBLEND_V2DF + +[VEC_BPERM, vec_bperm, __builtin_vec_vbperm_api, _ARCH_PWR8] + vull __builtin_vec_vbperm_api (vull, vuc); + VBPERMD VBPERMD_VULL + vull __builtin_vec_vbperm_api (vuq, vuc); + VBPERMQ VBPERMQ_VUQ + vuc __builtin_vec_vbperm_api (vuc, vuc); + VBPERMQ2 VBPERMQ2_U + vsc __builtin_vec_vbperm_api (vsc, vsc); + VBPERMQ2 VBPERMQ2_S + +[VEC_CEIL, vec_ceil, __builtin_vec_ceil] + vf __builtin_vec_ceil (vf); + VRFIP + vd __builtin_vec_ceil (vd); + XVRDPIP + +[VEC_CFUGE, vec_cfuge, __builtin_vec_cfuge, _ARCH_PWR10] + vull __builtin_vec_cfuge (vull, vull); + VCFUGED + +[VEC_CIPHER_BE, vec_cipher_be, __builtin_vec_vcipher_be, _ARCH_PWR8] + vuc __builtin_vec_vcipher_be (vuc, vuc); + VCIPHER_BE + +[VEC_CIPHERLAST_BE, vec_cipherlast_be, __builtin_vec_vcipherlast_be, _ARCH_PWR8] + vuc __builtin_vec_vcipherlast_be (vuc, vuc); + VCIPHERLAST_BE + +[VEC_CLRL, vec_clrl, __builtin_vec_clrl, _ARCH_PWR10] + vsc __builtin_vec_clrl (vsc, unsigned int); + VCLRLB VCLRLB_S + vuc __builtin_vec_clrl (vuc, unsigned int); + VCLRLB VCLRLB_U + +[VEC_CLRR, vec_clrr, __builtin_vec_clrr, _ARCH_PWR10] + vsc __builtin_vec_clrr (vsc, unsigned int); + VCLRRB VCLRRB_S + vuc __builtin_vec_clrr (vuc, unsigned int); + VCLRRB VCLRRB_U + +; We skip generating a #define because of the C-versus-C++ complexity +; in altivec.h. Look there for the template-y details. +[VEC_CMPAE_P, SKIP, __builtin_vec_vcmpae_p] + signed int __builtin_vec_vcmpae_p (vsc, vsc); + VCMPAEB_P VCMPAEB_VSC_P + signed int __builtin_vec_vcmpae_p (vuc, vuc); + VCMPAEB_P VCMPAEB_VUC_P + signed int __builtin_vec_vcmpae_p (vbc, vbc); + VCMPAEB_P VCMPAEB_VBC_P + signed int __builtin_vec_vcmpae_p (vss, vss); + VCMPAEH_P VCMPAEH_VSS_P + signed int __builtin_vec_vcmpae_p (vus, vus); + VCMPAEH_P VCMPAEH_VUS_P + signed int __builtin_vec_vcmpae_p (vbs, vbs); + VCMPAEH_P VCMPAEH_VBS_P + signed int __builtin_vec_vcmpae_p (vp, vp); + VCMPAEH_P VCMPAEH_VP_P + signed int __builtin_vec_vcmpae_p (vsi, vsi); + VCMPAEW_P VCMPAEW_VSI_P + signed int __builtin_vec_vcmpae_p (vui, vui); + VCMPAEW_P VCMPAEW_VUI_P + signed int __builtin_vec_vcmpae_p (vbi, vbi); + VCMPAEW_P VCMPAEW_VBI_P + signed int __builtin_vec_vcmpae_p (vsll, vsll); + VCMPAED_P VCMPAED_VSLL_P + signed int __builtin_vec_vcmpae_p (vull, vull); + VCMPAED_P VCMPAED_VULL_P + signed int __builtin_vec_vcmpae_p (vbll, vbll); + VCMPAED_P VCMPAED_VBLL_P + signed int __builtin_vec_vcmpae_p (vsq, vsq); + VCMPAET_P VCMPAET_VSQ_P + signed int __builtin_vec_vcmpae_p (vuq, vuq); + VCMPAET_P VCMPAET_VUQ_P + signed int __builtin_vec_vcmpae_p (vf, vf); + VCMPAEFP_P + signed int __builtin_vec_vcmpae_p (vd, vd); + VCMPAEDP_P +; The following variants are deprecated. + signed int __builtin_vec_vcmpae_p (signed int, vbc, vuc); + VCMPAEB_P VCMPAEB_P_BU + signed int __builtin_vec_vcmpae_p (signed int, vuc, vbc); + VCMPAEB_P VCMPAEB_P_UB + signed int __builtin_vec_vcmpae_p (signed int, vbc, vsc); + VCMPAEB_P VCMPAEB_P_BS + signed int __builtin_vec_vcmpae_p (signed int, vsc, vbc); + VCMPAEB_P VCMPAEB_P_SB + signed int __builtin_vec_vcmpae_p (signed int, vbs, vus); + VCMPAEH_P VCMPAEH_P_BU + signed int __builtin_vec_vcmpae_p (signed int, vus, vbs); + VCMPAEH_P VCMPAEH_P_UB + signed int __builtin_vec_vcmpae_p (signed int, vbs, vss); + VCMPAEH_P VCMPAEH_P_BS + signed int __builtin_vec_vcmpae_p (signed int, vss, vbs); + VCMPAEH_P VCMPAEH_P_SB + signed int __builtin_vec_vcmpae_p (signed int, vbi, vui); + VCMPAEW_P VCMPAEW_P_BU + signed int __builtin_vec_vcmpae_p (signed int, vui, vbi); + VCMPAEW_P VCMPAEW_P_UB + signed int __builtin_vec_vcmpae_p (signed int, vbi, vsi); + VCMPAEW_P VCMPAEW_P_BS + signed int __builtin_vec_vcmpae_p (signed int, vsi, vbi); + VCMPAEW_P VCMPAEW_P_SB + signed int __builtin_vec_vcmpae_p (signed int, vbll, vull); + VCMPAED_P VCMPAED_P_BU + signed int __builtin_vec_vcmpae_p (signed int, vull, vbll); + VCMPAED_P VCMPAED_P_UB + signed int __builtin_vec_vcmpae_p (signed int, vbll, vsll); + VCMPAED_P VCMPAED_P_BS + signed int __builtin_vec_vcmpae_p (signed int, vbll, vsll); + VCMPAED_P VCMPAED_P_SB + +[VEC_CMPB, vec_cmpb, __builtin_vec_cmpb] + vsi __builtin_vec_cmpb (vf, vf); + VCMPBFP + +[VEC_CMPEQ, vec_cmpeq, __builtin_vec_cmpeq] + vbc __builtin_vec_cmpeq (vsc, vsc); + VCMPEQUB VCMPEQUB_VSC + vbc __builtin_vec_cmpeq (vuc, vuc); + VCMPEQUB VCMPEQUB_VUC + vbc __builtin_vec_cmpeq (vbc, vbc); + VCMPEQUB VCMPEQUB_VBC + vbs __builtin_vec_cmpeq (vss, vss); + VCMPEQUH VCMPEQUH_VSS + vbs __builtin_vec_cmpeq (vus, vus); + VCMPEQUH VCMPEQUH_VUS + vbs __builtin_vec_cmpeq (vbs, vbs); + VCMPEQUH VCMPEQUH_VBS + vbi __builtin_vec_cmpeq (vsi, vsi); + VCMPEQUW VCMPEQUW_VSI + vbi __builtin_vec_cmpeq (vui, vui); + VCMPEQUW VCMPEQUW_VUI + vbi __builtin_vec_cmpeq (vbi, vbi); + VCMPEQUW VCMPEQUW_VBI + vbll __builtin_vec_cmpeq (vsll, vsll); + VCMPEQUD VCMPEQUD_VSLL + vbll __builtin_vec_cmpeq (vull, vull); + VCMPEQUD VCMPEQUD_VULL + vbll __builtin_vec_cmpeq (vbll, vbll); + VCMPEQUD VCMPEQUD_VBLL + vbq __builtin_vec_cmpeq (vsq, vsq); + VCMPEQUT VCMPEQUT_VSQ + vbq __builtin_vec_cmpeq (vuq, vuq); + VCMPEQUT VCMPEQUT_VUQ + vbi __builtin_vec_cmpeq (vf, vf); + VCMPEQFP + vbll __builtin_vec_cmpeq (vd, vd); + XVCMPEQDP + +; We skip generating a #define because of the C-versus-C++ complexity +; in altivec.h. Look there for the template-y details. +[VEC_CMPEQ_P, SKIP, __builtin_vec_vcmpeq_p] + signed int __builtin_vec_vcmpeq_p (signed int, vuc, vuc); + VCMPEQUB_P VCMPEQUB_PU + signed int __builtin_vec_vcmpeq_p (signed int, vsc, vsc); + VCMPEQUB_P VCMPEQUB_PS + signed int __builtin_vec_vcmpeq_p (signed int, vbc, vbc); + VCMPEQUB_P VCMPEQUB_PB + signed int __builtin_vec_vcmpeq_p (signed int, vus, vus); + VCMPEQUH_P VCMPEQUH_PU + signed int __builtin_vec_vcmpeq_p (signed int, vss, vss); + VCMPEQUH_P VCMPEQUH_PS + signed int __builtin_vec_vcmpeq_p (signed int, vbs, vbs); + VCMPEQUH_P VCMPEQUH_PB + signed int __builtin_vec_vcmpeq_p (signed int, vp, vp); + VCMPEQUH_P VCMPEQUH_PP + signed int __builtin_vec_vcmpeq_p (signed int, vui, vui); + VCMPEQUW_P VCMPEQUW_PU + signed int __builtin_vec_vcmpeq_p (signed int, vsi, vsi); + VCMPEQUW_P VCMPEQUW_PS + signed int __builtin_vec_vcmpeq_p (signed int, vbi, vbi); + VCMPEQUW_P VCMPEQUW_PB + signed int __builtin_vec_vcmpeq_p (signed int, vull, vull); + VCMPEQUD_P VCMPEQUD_PU + signed int __builtin_vec_vcmpeq_p (signed int, vsll, vsll); + VCMPEQUD_P VCMPEQUD_PS + signed int __builtin_vec_vcmpeq_p (signed int, vbll, vbll); + VCMPEQUD_P VCMPEQUD_PB + signed int __builtin_vec_vcmpeq_p (signed int, vsq, vsq); + VCMPEQUT_P VCMPEQUT_P_VSQ + signed int __builtin_vec_vcmpeq_p (signed int, vuq, vuq); + VCMPEQUT_P VCMPEQUT_P_VUQ + signed int __builtin_vec_vcmpeq_p (signed int, vf, vf); + VCMPEQFP_P + signed int __builtin_vec_vcmpeq_p (signed int, vd, vd); + XVCMPEQDP_P +; The following variants are deprecated. + signed int __builtin_vec_vcmpeq_p (signed int, vbc, vuc); + VCMPEQUB_P VCMPEQUB_P_BU + signed int __builtin_vec_vcmpeq_p (signed int, vuc, vbc); + VCMPEQUB_P VCMPEQUB_P_UB + signed int __builtin_vec_vcmpeq_p (signed int, vbc, vsc); + VCMPEQUB_P VCMPEQUB_P_BS + signed int __builtin_vec_vcmpeq_p (signed int, vsc, vbc); + VCMPEQUB_P VCMPEQUB_P_SB + signed int __builtin_vec_vcmpeq_p (signed int, vbs, vus); + VCMPEQUH_P VCMPEQUH_P_BU + signed int __builtin_vec_vcmpeq_p (signed int, vus, vbs); + VCMPEQUH_P VCMPEQUH_P_UB + signed int __builtin_vec_vcmpeq_p (signed int, vbs, vss); + VCMPEQUH_P VCMPEQUH_P_BS + signed int __builtin_vec_vcmpeq_p (signed int, vss, vbs); + VCMPEQUH_P VCMPEQUH_P_SB + signed int __builtin_vec_vcmpeq_p (signed int, vbi, vui); + VCMPEQUW_P VCMPEQUW_P_BU + signed int __builtin_vec_vcmpeq_p (signed int, vui, vbi); + VCMPEQUW_P VCMPEQUW_P_UB + signed int __builtin_vec_vcmpeq_p (signed int, vbi, vsi); + VCMPEQUW_P VCMPEQUW_P_BS + signed int __builtin_vec_vcmpeq_p (signed int, vsi, vbi); + VCMPEQUW_P VCMPEQUW_P_SB + signed int __builtin_vec_vcmpeq_p (signed int, vbll, vull); + VCMPEQUD_P VCMPEQUD_P_BU + signed int __builtin_vec_vcmpeq_p (signed int, vull, vbll); + VCMPEQUD_P VCMPEQUD_P_UB + signed int __builtin_vec_vcmpeq_p (signed int, vbll, vsll); + VCMPEQUD_P VCMPEQUD_P_BS + signed int __builtin_vec_vcmpeq_p (signed int, vbll, vsll); + VCMPEQUD_P VCMPEQUD_P_SB + +[VEC_CMPEQB, SKIP, __builtin_byte_in_set] + signed int __builtin_byte_in_set (unsigned int, unsigned long long); + CMPEQB + +[VEC_CMPGE, vec_cmpge, __builtin_vec_cmpge] + vbc __builtin_vec_cmpge (vsc, vsc); + CMPGE_16QI CMPGE_16QI_VSC + vbc __builtin_vec_cmpge (vuc, vuc); + CMPGE_U16QI CMPGE_16QI_VUC + vbs __builtin_vec_cmpge (vss, vss); + CMPGE_8HI CMPGE_8HI_VSS + vbs __builtin_vec_cmpge (vus, vus); + CMPGE_U8HI CMPGE_8HI_VUS + vbi __builtin_vec_cmpge (vsi, vsi); + CMPGE_4SI CMPGE_4SI_VSI + vbi __builtin_vec_cmpge (vui, vui); + CMPGE_U4SI CMPGE_4SI_VUI + vbll __builtin_vec_cmpge (vsll, vsll); + CMPGE_2DI CMPGE_2DI_VSLL + vbll __builtin_vec_cmpge (vull, vull); + CMPGE_U2DI CMPGE_2DI_VULL + vbq __builtin_vec_cmpge (vsq, vsq); + CMPGE_1TI + vbq __builtin_vec_cmpge (vuq, vuq); + CMPGE_U1TI + vbi __builtin_vec_cmpge (vf, vf); + VCMPGEFP + vbll __builtin_vec_cmpge (vd, vd); + XVCMPGEDP + +; We skip generating a #define because of the C-versus-C++ complexity +; in altivec.h. Look there for the template-y details. +; See altivec_build_resolved_builtin for how we deal with VEC_CMPGE_P. +; It's quite strange and horrible! +[VEC_CMPGE_P, SKIP, __builtin_vec_vcmpge_p] + signed int __builtin_vec_vcmpge_p (signed int, vuc, vuc); + VCMPGTUB_P VCMPGTUB_PR + signed int __builtin_vec_vcmpge_p (signed int, vsc, vsc); + VCMPGTSB_P VCMPGTSB_PR + signed int __builtin_vec_vcmpge_p (signed int, vus, vus); + VCMPGTUH_P VCMPGTUH_PR + signed int __builtin_vec_vcmpge_p (signed int, vss, vss); + VCMPGTSH_P VCMPGTSH_PR + signed int __builtin_vec_vcmpge_p (signed int, vui, vui); + VCMPGTUW_P VCMPGTUW_PR + signed int __builtin_vec_vcmpge_p (signed int, vsi, vsi); + VCMPGTSW_P VCMPGTSW_PR + signed int __builtin_vec_vcmpge_p (signed int, vull, vull); + VCMPGTUD_P VCMPGTUD_PR + signed int __builtin_vec_vcmpge_p (signed int, vsll, vsll); + VCMPGTSD_P VCMPGTSD_PR + signed int __builtin_vec_vcmpge_p (signed int, vuq, vuq); + VCMPGTUT_P VCMPGTUT_PR + signed int __builtin_vec_vcmpge_p (signed int, vsq, vsq); + VCMPGTST_P VCMPGTST_PR + signed int __builtin_vec_vcmpge_p (signed int, vf, vf); + VCMPGEFP_P + signed int __builtin_vec_vcmpge_p (signed int, vd, vd); + XVCMPGEDP_P +; The following variants are deprecated. + signed int __builtin_vec_vcmpge_p (signed int, vbc, vuc); + VCMPGTUB_P VCMPGTUB_PR_BU + signed int __builtin_vec_vcmpge_p (signed int, vuc, vbc); + VCMPGTUB_P VCMPGTUB_PR_UB + signed int __builtin_vec_vcmpge_p (signed int, vbc, vsc); + VCMPGTSB_P VCMPGTSB_PR_BS + signed int __builtin_vec_vcmpge_p (signed int, vsc, vbc); + VCMPGTSB_P VCMPGTSB_PR_SB + signed int __builtin_vec_vcmpge_p (signed int, vbs, vus); + VCMPGTUH_P VCMPGTUH_PR_BU + signed int __builtin_vec_vcmpge_p (signed int, vus, vbs); + VCMPGTUH_P VCMPGTUH_PR_UB + signed int __builtin_vec_vcmpge_p (signed int, vbs, vss); + VCMPGTSH_P VCMPGTSH_PR_BS + signed int __builtin_vec_vcmpge_p (signed int, vss, vbs); + VCMPGTSH_P VCMPGTSH_PR_SB + signed int __builtin_vec_vcmpge_p (signed int, vbi, vui); + VCMPGTUW_P VCMPGTUW_PR_BU + signed int __builtin_vec_vcmpge_p (signed int, vui, vbi); + VCMPGTUW_P VCMPGTUW_PR_UB + signed int __builtin_vec_vcmpge_p (signed int, vbi, vsi); + VCMPGTSW_P VCMPGTSW_PR_BS + signed int __builtin_vec_vcmpge_p (signed int, vsi, vbi); + VCMPGTSW_P VCMPGTSW_PR_SB + signed int __builtin_vec_vcmpge_p (signed int, vbll, vull); + VCMPGTUD_P VCMPGTUD_PR_BU + signed int __builtin_vec_vcmpge_p (signed int, vull, vbll); + VCMPGTUD_P VCMPGTUD_PR_UB + signed int __builtin_vec_vcmpge_p (signed int, vbll, vsll); + VCMPGTSD_P VCMPGTSD_PR_BS + signed int __builtin_vec_vcmpge_p (signed int, vsll, vbll); + VCMPGTSD_P VCMPGTSD_PR_SB + +[VEC_CMPGT, vec_cmpgt, __builtin_vec_cmpgt] + vbc __builtin_vec_cmpgt (vsc, vsc); + VCMPGTSB + vbc __builtin_vec_cmpgt (vuc, vuc); + VCMPGTUB + vbs __builtin_vec_cmpgt (vss, vss); + VCMPGTSH + vbs __builtin_vec_cmpgt (vus, vus); + VCMPGTUH + vbi __builtin_vec_cmpgt (vsi, vsi); + VCMPGTSW + vbi __builtin_vec_cmpgt (vui, vui); + VCMPGTUW + vbll __builtin_vec_cmpgt (vsll, vsll); + VCMPGTSD + vbll __builtin_vec_cmpgt (vull, vull); + VCMPGTUD + vbq __builtin_vec_cmpgt (vsq, vsq); + VCMPGTST + vbq __builtin_vec_cmpgt (vuq, vuq); + VCMPGTUT + vbi __builtin_vec_cmpgt (vf, vf); + VCMPGTFP + vbll __builtin_vec_cmpgt (vd, vd); + XVCMPGTDP + +; We skip generating a #define because of the C-versus-C++ complexity +; in altivec.h. Look there for the template-y details. +[VEC_CMPGT_P, SKIP, __builtin_vec_vcmpgt_p] + signed int __builtin_vec_vcmpgt_p (signed int, vuc, vuc); + VCMPGTUB_P + signed int __builtin_vec_vcmpgt_p (signed int, vsc, vsc); + VCMPGTSB_P + signed int __builtin_vec_vcmpgt_p (signed int, vus, vus); + VCMPGTUH_P + signed int __builtin_vec_vcmpgt_p (signed int, vss, vss); + VCMPGTSH_P + signed int __builtin_vec_vcmpgt_p (signed int, vui, vui); + VCMPGTUW_P + signed int __builtin_vec_vcmpgt_p (signed int, vsi, vsi); + VCMPGTSW_P + signed int __builtin_vec_vcmpgt_p (signed int, vull, vull); + VCMPGTUD_P + signed int __builtin_vec_vcmpgt_p (signed int, vsll, vsll); + VCMPGTSD_P + signed int __builtin_vec_vcmpgt_p (signed int, vuq, vuq); + VCMPGTUT_P + signed int __builtin_vec_vcmpgt_p (signed int, vsq, vsq); + VCMPGTST_P + signed int __builtin_vec_vcmpgt_p (signed int, vf, vf); + VCMPGTFP_P + signed int __builtin_vec_vcmpgt_p (signed int, vd, vd); + XVCMPGTDP_P +; The following variants are deprecated. + signed int __builtin_vec_vcmpgt_p (signed int, vbc, vuc); + VCMPGTUB_P VCMPGTUB_P_BU + signed int __builtin_vec_vcmpgt_p (signed int, vuc, vbc); + VCMPGTUB_P VCMPGTUB_P_UB + signed int __builtin_vec_vcmpgt_p (signed int, vbc, vsc); + VCMPGTSB_P VCMPGTSB_P_BS + signed int __builtin_vec_vcmpgt_p (signed int, vsc, vbc); + VCMPGTSB_P VCMPGTSB_P_SB + signed int __builtin_vec_vcmpgt_p (signed int, vbs, vus); + VCMPGTUH_P VCMPGTUH_P_BU + signed int __builtin_vec_vcmpgt_p (signed int, vus, vbs); + VCMPGTUH_P VCMPGTUH_P_UB + signed int __builtin_vec_vcmpgt_p (signed int, vbs, vss); + VCMPGTSH_P VCMPGTSH_P_BS + signed int __builtin_vec_vcmpgt_p (signed int, vss, vbs); + VCMPGTSH_P VCMPGTSH_P_SB + signed int __builtin_vec_vcmpgt_p (signed int, vbi, vui); + VCMPGTUW_P VCMPGTUW_P_BU + signed int __builtin_vec_vcmpgt_p (signed int, vui, vbi); + VCMPGTUW_P VCMPGTUW_P_UB + signed int __builtin_vec_vcmpgt_p (signed int, vbi, vsi); + VCMPGTSW_P VCMPGTSW_P_BS + signed int __builtin_vec_vcmpgt_p (signed int, vsi, vbi); + VCMPGTSW_P VCMPGTSW_P_SB + signed int __builtin_vec_vcmpgt_p (signed int, vbll, vull); + VCMPGTUD_P VCMPGTUD_P_BU + signed int __builtin_vec_vcmpgt_p (signed int, vull, vbll); + VCMPGTUD_P VCMPGTUD_P_UB + signed int __builtin_vec_vcmpgt_p (signed int, vbll, vsll); + VCMPGTSD_P VCMPGTSD_P_BS + signed int __builtin_vec_vcmpgt_p (signed int, vsll, vbll); + VCMPGTSD_P VCMPGTSD_P_SB + +; Note that there is no entry for VEC_CMPLE. VEC_CMPLE is implemented +; using VEC_CMPGE with reversed arguments in altivec.h. + +; Note that there is no entry for VEC_CMPLT. VEC_CMPLT is implemented +; using VEC_CMPGT with reversed arguments in altivec.h. + +[VEC_CMPNE, vec_cmpne, __builtin_vec_cmpne] + vbc __builtin_vec_cmpne (vbc, vbc); + VCMPNEB VCMPNEB_VBC + vbc __builtin_vec_cmpne (vsc, vsc); + VCMPNEB VCMPNEB_VSC + vbc __builtin_vec_cmpne (vuc, vuc); + VCMPNEB VCMPNEB_VUC + vbs __builtin_vec_cmpne (vbs, vbs); + VCMPNEH VCMPNEH_VBS + vbs __builtin_vec_cmpne (vss, vss); + VCMPNEH VCMPNEH_VSS + vbs __builtin_vec_cmpne (vus, vus); + VCMPNEH VCMPNEH_VUS + vbi __builtin_vec_cmpne (vbi, vbi); + VCMPNEW VCMPNEW_VBI + vbi __builtin_vec_cmpne (vsi, vsi); + VCMPNEW VCMPNEW_VSI + vbi __builtin_vec_cmpne (vui, vui); + VCMPNEW VCMPNEW_VUI + vbq __builtin_vec_cmpne (vsq, vsq); + VCMPNET VCMPNET_VSQ + vbq __builtin_vec_cmpne (vuq, vuq); + VCMPNET VCMPNET_VUQ + +; We skip generating a #define because of the C-versus-C++ complexity +; in altivec.h. Look there for the template-y details. +[VEC_CMPNE_P, SKIP, __builtin_vec_vcmpne_p] + signed int __builtin_vec_vcmpne_p (vsc, vsc); + VCMPNEB_P VCMPNEB_VSC_P + signed int __builtin_vec_vcmpne_p (vuc, vuc); + VCMPNEB_P VCMPNEB_VUC_P + signed int __builtin_vec_vcmpne_p (vbc, vbc); + VCMPNEB_P VCMPNEB_VBC_P + signed int __builtin_vec_vcmpne_p (vss, vss); + VCMPNEH_P VCMPNEH_VSS_P + signed int __builtin_vec_vcmpne_p (vus, vus); + VCMPNEH_P VCMPNEH_VUS_P + signed int __builtin_vec_vcmpne_p (vbs, vbs); + VCMPNEH_P VCMPNEH_VBS_P + signed int __builtin_vec_vcmpne_p (vp, vp); + VCMPNEH_P VCMPNEH_VP_P + signed int __builtin_vec_vcmpne_p (vsi, vsi); + VCMPNEW_P VCMPNEW_VSI_P + signed int __builtin_vec_vcmpne_p (vui, vui); + VCMPNEW_P VCMPNEW_VUI_P + signed int __builtin_vec_vcmpne_p (vbi, vbi); + VCMPNEW_P VCMPNEW_VBI_P + signed int __builtin_vec_vcmpne_p (vsll, vsll); + VCMPNED_P VCMPNED_VSLL_P + signed int __builtin_vec_vcmpne_p (vull, vull); + VCMPNED_P VCMPNED_VULL_P + signed int __builtin_vec_vcmpne_p (vbll, vbll); + VCMPNED_P VCMPNED_VBLL_P + signed int __builtin_vec_vcmpne_p (vsq, vsq); + VCMPNET_P VCMPNET_VSQ_P + signed int __builtin_vec_vcmpne_p (vuq, vuq); + VCMPNET_P VCMPNET_VUQ_P + signed int __builtin_vec_vcmpne_p (vf, vf); + VCMPNEFP_P + signed int __builtin_vec_vcmpne_p (vd, vd); + VCMPNEDP_P +; The following variants are deprecated. + signed int __builtin_vec_vcmpne_p (signed int, vbc, vuc); + VCMPNEB_P VCMPNEB_P_BU + signed int __builtin_vec_vcmpne_p (signed int, vuc, vbc); + VCMPNEB_P VCMPNEB_P_UB + signed int __builtin_vec_vcmpne_p (signed int, vbc, vsc); + VCMPNEB_P VCMPNEB_P_BS + signed int __builtin_vec_vcmpne_p (signed int, vsc, vbc); + VCMPNEB_P VCMPNEB_P_SB + signed int __builtin_vec_vcmpne_p (signed int, vbs, vus); + VCMPNEH_P VCMPNEH_P_BU + signed int __builtin_vec_vcmpne_p (signed int, vus, vbs); + VCMPNEH_P VCMPNEH_P_UB + signed int __builtin_vec_vcmpne_p (signed int, vbs, vss); + VCMPNEH_P VCMPNEH_P_BS + signed int __builtin_vec_vcmpne_p (signed int, vss, vbs); + VCMPNEH_P VCMPNEH_P_SB + signed int __builtin_vec_vcmpne_p (signed int, vbi, vui); + VCMPNEW_P VCMPNEW_P_BU + signed int __builtin_vec_vcmpne_p (signed int, vui, vbi); + VCMPNEW_P VCMPNEW_P_UB + signed int __builtin_vec_vcmpne_p (signed int, vbi, vsi); + VCMPNEW_P VCMPNEW_P_BS + signed int __builtin_vec_vcmpne_p (signed int, vsi, vbi); + VCMPNEW_P VCMPNEW_P_SB + signed int __builtin_vec_vcmpne_p (signed int, vbll, vull); + VCMPNED_P VCMPNED_P_BU + signed int __builtin_vec_vcmpne_p (signed int, vull, vbll); + VCMPNED_P VCMPNED_P_UB + signed int __builtin_vec_vcmpne_p (signed int, vbll, vsll); + VCMPNED_P VCMPNED_P_BS + signed int __builtin_vec_vcmpne_p (signed int, vbll, vsll); + VCMPNED_P VCMPNED_P_SB + +[VEC_CMPNEZ, vec_cmpnez, __builtin_vec_vcmpnez, _ARCH_PWR9] + vbc __builtin_vec_cmpnez (vsc, vsc); + CMPNEZB CMPNEZB_S + vbc __builtin_vec_cmpnez (vuc, vuc); + CMPNEZB CMPNEZB_U + vbs __builtin_vec_cmpnez (vss, vss); + CMPNEZH CMPNEZH_S + vbs __builtin_vec_cmpnez (vus, vus); + CMPNEZH CMPNEZH_U + vbi __builtin_vec_cmpnez (vsi, vsi); + CMPNEZW CMPNEZW_S + vbi __builtin_vec_cmpnez (vui, vui); + CMPNEZW CMPNEZW_U + +; We skip generating a #define because of the C-versus-C++ complexity +; in altivec.h. Look there for the template-y details. +[VEC_CMPNEZ_P, SKIP, __builtin_vec_vcmpnez_p] + signed int __builtin_vec_vcmpnez_p (signed int, vsc, vsc); + VCMPNEZB_P VCMPNEZB_VSC_P + signed int __builtin_vec_vcmpnez_p (signed int, vuc, vuc); + VCMPNEZB_P VCMPNEZB_VUC_P + signed int __builtin_vec_vcmpnez_p (signed int, vss, vss); + VCMPNEZH_P VCMPNEZH_VSS_P + signed int __builtin_vec_vcmpnez_p (signed int, vus, vus); + VCMPNEZH_P VCMPNEZH_VUS_P + signed int __builtin_vec_vcmpnez_p (signed int, vsi, vsi); + VCMPNEZW_P VCMPNEZW_VSI_P + signed int __builtin_vec_vcmpnez_p (signed int, vui, vui); + VCMPNEZW_P VCMPNEZW_VUI_P + +[VEC_CMPRB, SKIP, __builtin_byte_in_range] + signed int __builtin_byte_in_range (unsigned int, unsigned int); + CMPRB + +[VEC_CMPRB2, SKIP, __builtin_byte_in_either_range] + signed int __builtin_byte_in_range (unsigned int, unsigned int); + CMPRB2 + +[VEC_CNTLZ, vec_cntlz, __builtin_vec_vclz, _ARCH_PWR8] + vsc __builtin_vec_vclz (vsc); + VCLZB VCLZB_S + vuc __builtin_vec_vclz (vuc); + VCLZB VCLZB_U + vss __builtin_vec_vclz (vss); + VCLZH VCLZH_S + vus __builtin_vec_vclz (vus); + VCLZH VCLZH_U + vsi __builtin_vec_vclz (vsi); + VCLZW VCLZW_S + vui __builtin_vec_vclz (vui); + VCLZW VCLZW_U + vsll __builtin_vec_vclz (vsll); + VCLZD VCLZD_S + vull __builtin_vec_vclz (vull); + VCLZD VCLZD_U + +[VEC_CNTLZM, vec_cntlzm, __builtin_vec_vclzdm, _ARCH_PWR10] + vull __builtin_vec_vclzdm (vull, vull); + VCLZDM + +[VEC_CNTTZM, vec_cnttzm, __builtin_vec_vctzdm, _ARCH_PWR10] + vull __builtin_vec_vctzdm (vull, vull); + VCTZDM + +[VEC_CNTLZ_LSBB, vec_cntlz_lsbb, __builtin_vec_vclzlsbb, _ARCH_PWR9] + signed int __builtin_vec_vclzlsbb (vsc); + VCLZLSBB_V16QI VCLZLSBB_VSC + signed int __builtin_vec_vclzlsbb (vuc); + VCLZLSBB_V16QI VCLZLSBB_VUC + signed int __builtin_vec_vclzlsbb (vss); + VCLZLSBB_V8HI VCLZLSBB_VSS + signed int __builtin_vec_vclzlsbb (vus); + VCLZLSBB_V8HI VCLZLSBB_VUS + signed int __builtin_vec_vclzlsbb (vsi); + VCLZLSBB_V4SI VCLZLSBB_VSI + signed int __builtin_vec_vclzlsbb (vui); + VCLZLSBB_V4SI VCLZLSBB_VUI + +[VEC_CNTM, vec_cntm, __builtin_vec_cntm, _ARCH_PWR10] + unsigned long long __builtin_vec_cntm (vuc, const int); + VCNTMBB + unsigned long long __builtin_vec_cntm (vus, const int); + VCNTMBH + unsigned long long __builtin_vec_cntm (vui, const int); + VCNTMBW + unsigned long long __builtin_vec_cntm (vull, const int); + VCNTMBD + +[VEC_CNTTZ, vec_cnttz, __builtin_vec_vctz, _ARCH_PWR9] + vsc __builtin_vec_vctz (vsc); + VCTZB VCTZB_S + vuc __builtin_vec_vctz (vuc); + VCTZB VCTZB_U + vss __builtin_vec_vctz (vss); + VCTZH VCTZH_S + vus __builtin_vec_vctz (vus); + VCTZH VCTZH_U + vsi __builtin_vec_vctz (vsi); + VCTZW VCTZW_S + vui __builtin_vec_vctz (vui); + VCTZW VCTZW_U + vsll __builtin_vec_vctz (vsll); + VCTZD VCTZD_S + vull __builtin_vec_vctz (vull); + VCTZD VCTZD_U + +[VEC_CNTTZ_LSBB, vec_cnttz_lsbb, __builtin_vec_vctzlsbb, _ARCH_PWR9] + signed int __builtin_vec_vctzlsbb (vsc); + VCTZLSBB_V16QI VCTZLSBB_VSC + signed int __builtin_vec_vctzlsbb (vuc); + VCTZLSBB_V16QI VCTZLSBB_VUC + signed int __builtin_vec_vctzlsbb (vss); + VCTZLSBB_V8HI VCTZLSBB_VSS + signed int __builtin_vec_vctzlsbb (vus); + VCTZLSBB_V8HI VCTZLSBB_VUS + signed int __builtin_vec_vctzlsbb (vsi); + VCTZLSBB_V4SI VCTZLSBB_VSI + signed int __builtin_vec_vctzlsbb (vui); + VCTZLSBB_V4SI VCTZLSBB_VUI + +[VEC_CONVERT_4F32_8I16, SKIP, __builtin_vec_convert_4f32_8i16] + vus __builtin_vec_convert_4f32_8i16 (vf, vf); + CONVERT_4F32_8I16 + +[VEC_CONVERT_4F32_8F16, vec_pack_to_short_fp32, __builtin_vec_convert_4f32_8f16, _ARCH_PWR9] + vus __builtin_vec_convert_4f32_8f16 (vf, vf); + CONVERT_4F32_8F16 + +[VEC_COPYSIGN, vec_cpsgn, __builtin_vec_copysign] + vf __builtin_vec_copysign (vf, vf); + CPSGNSP + vd __builtin_vec_copysign (vd, vd); + CPSGNDP + +[VEC_CTF, vec_ctf, __builtin_vec_ctf] + vf __builtin_vec_ctf (vsi, const int); + VCFSX + vf __builtin_vec_ctf (vui, const int); + VCFUX + vd __builtin_vec_ctf (vsll, const int); + XVCVSXDDP_SCALE + vd __builtin_vec_ctf (vull, const int); + XVCVUXDDP_SCALE + +[VEC_CTS, vec_cts, __builtin_vec_cts] + vsi __builtin_vec_cts (vf, const int); + VCTSXS + vsll __builtin_vec_cts (vd, const int); + XVCVDPSXDS_SCALE + +[VEC_CTU, vec_ctu, __builtin_vec_ctu] + vui __builtin_vec_ctu (vf, const int); + VCTUXS + vull __builtin_vec_ctu (vd, const int); + XVCVDPUXDS_SCALE + +[VEC_DIV, vec_div, __builtin_vec_div, __VSX__] + vsi __builtin_vec_div (vsi, vsi); + VDIVSW + vui __builtin_vec_div (vui, vui); + VDIVUW + vsll __builtin_vec_div (vsll, vsll); + DIV_V2DI + vull __builtin_vec_div (vull, vull); + UDIV_V2DI + vsq __builtin_vec_div (vsq, vsq); + DIV_V1TI + vuq __builtin_vec_div (vuq, vuq); + UDIV_V1TI + vf __builtin_vec_div (vf, vf); + XVDIVSP + vd __builtin_vec_div (vd, vd); + XVDIVDP + +[VEC_DIVE, vec_dive, __builtin_vec_dive, _ARCH_PWR10] + vsi __builtin_vec_dive (vsi, vsi); + VDIVESW + vui __builtin_vec_dive (vui, vui); + VDIVEUW + vsll __builtin_vec_dive (vsll, vsll); + VDIVESD + vull __builtin_vec_dive (vull, vull); + VDIVEUD + vsq __builtin_vec_dive (vsq, vsq); + DIVES_V1TI + vuq __builtin_vec_dive (vuq, vuq); + DIVEU_V1TI + +[VEC_DOUBLE, vec_double, __builtin_vec_double] + vd __builtin_vec_double (vsll); + XVCVSXDDP + vd __builtin_vec_double (vull); + XVCVUXDDP + +[VEC_DOUBLEE, vec_doublee, __builtin_vec_doublee] + vd __builtin_vec_doublee (vsi); + DOUBLEE_V4SI + vd __builtin_vec_doublee (vui); + UNS_DOUBLEE_V4SI + vd __builtin_vec_doublee (vf); + DOUBLEE_V4SF + +[VEC_DOUBLEH, vec_doubleh, __builtin_vec_doubleh] + vd __builtin_vec_doubleh (vsi); + DOUBLEH_V4SI + vd __builtin_vec_doubleh (vui); + UNS_DOUBLEH_V4SI + vd __builtin_vec_doubleh (vf); + DOUBLEH_V4SF + +[VEC_DOUBLEL, vec_doublel, __builtin_vec_doublel] + vd __builtin_vec_doublel (vsi); + DOUBLEL_V4SI + vd __builtin_vec_doublel (vui); + UNS_DOUBLEL_V4SI + vd __builtin_vec_doublel (vf); + DOUBLEL_V4SF + +[VEC_DOUBLEO, vec_doubleo, __builtin_vec_doubleo] + vd __builtin_vec_doubleo (vsi); + DOUBLEO_V4SI + vd __builtin_vec_doubleo (vui); + UNS_DOUBLEO_V4SI + vd __builtin_vec_doubleo (vf); + DOUBLEO_V4SF + +[VEC_DST, vec_dst, __builtin_vec_dst] + void __builtin_vec_dst (unsigned char *, const int, const int); + DST DST_UC + void __builtin_vec_dst (signed char *, const int, const int); + DST DST_SC + void __builtin_vec_dst (unsigned short *, const int, const int); + DST DST_US + void __builtin_vec_dst (signed short *, const int, const int); + DST DST_SS + void __builtin_vec_dst (unsigned int *, const int, const int); + DST DST_UI + void __builtin_vec_dst (signed int *, const int, const int); + DST DST_SI + void __builtin_vec_dst (unsigned long *, const int, const int); + DST DST_UL + void __builtin_vec_dst (signed long *, const int, const int); + DST DST_SL + void __builtin_vec_dst (unsigned long long *, const int, const int); + DST DST_ULL + void __builtin_vec_dst (signed long long *, const int, const int); + DST DST_SLL + void __builtin_vec_dst (float *, const int, const int); + DST DST_F + void __builtin_vec_dst (vuc *, const int, const int); + DST DST_VUC + void __builtin_vec_dst (vsc *, const int, const int); + DST DST_VSC + void __builtin_vec_dst (vbc *, const int, const int); + DST DST_VBC + void __builtin_vec_dst (vus *, const int, const int); + DST DST_VUS + void __builtin_vec_dst (vss *, const int, const int); + DST DST_VSS + void __builtin_vec_dst (vbs *, const int, const int); + DST DST_VBS + void __builtin_vec_dst (vp *, const int, const int); + DST DST_VP + void __builtin_vec_dst (vui *, const int, const int); + DST DST_VUI + void __builtin_vec_dst (vsi *, const int, const int); + DST DST_VSI + void __builtin_vec_dst (vbi *, const int, const int); + DST DST_VBI + void __builtin_vec_dst (vf *, const int, const int); + DST DST_VF + +[VEC_DSTST, vec_dstst, __builtin_vec_dstst] + void __builtin_vec_dstst (unsigned char *, const int, const int); + DSTST DSTST_UC + void __builtin_vec_dstst (signed char *, const int, const int); + DSTST DSTST_SC + void __builtin_vec_dstst (unsigned short *, const int, const int); + DSTST DSTST_US + void __builtin_vec_dstst (signed short *, const int, const int); + DSTST DSTST_SS + void __builtin_vec_dstst (unsigned int *, const int, const int); + DSTST DSTST_UI + void __builtin_vec_dstst (signed int *, const int, const int); + DSTST DSTST_SI + void __builtin_vec_dstst (unsigned long *, const int, const int); + DSTST DSTST_UL + void __builtin_vec_dstst (signed long *, const int, const int); + DSTST DSTST_SL + void __builtin_vec_dstst (unsigned long long *, const int, const int); + DSTST DSTST_ULL + void __builtin_vec_dstst (signed long long *, const int, const int); + DSTST DSTST_SLL + void __builtin_vec_dstst (float *, const int, const int); + DSTST DSTST_F + void __builtin_vec_dstst (vuc *, const int, const int); + DSTST DSTST_VUC + void __builtin_vec_dstst (vsc *, const int, const int); + DSTST DSTST_VSC + void __builtin_vec_dstst (vbc *, const int, const int); + DSTST DSTST_VBC + void __builtin_vec_dstst (vus *, const int, const int); + DSTST DSTST_VUS + void __builtin_vec_dstst (vss *, const int, const int); + DSTST DSTST_VSS + void __builtin_vec_dstst (vbs *, const int, const int); + DSTST DSTST_VBS + void __builtin_vec_dstst (vp *, const int, const int); + DSTST DSTST_VP + void __builtin_vec_dstst (vui *, const int, const int); + DSTST DSTST_VUI + void __builtin_vec_dstst (vsi *, const int, const int); + DSTST DSTST_VSI + void __builtin_vec_dstst (vbi *, const int, const int); + DSTST DSTST_VBI + void __builtin_vec_dstst (vf *, const int, const int); + DSTST DSTST_VF + +[VEC_DSTSTT, vec_dststt, __builtin_vec_dststt] + void __builtin_vec_dststt (unsigned char *, const int, const int); + DSTSTT DSTSTT_UC + void __builtin_vec_dststt (signed char *, const int, const int); + DSTSTT DSTSTT_SC + void __builtin_vec_dststt (unsigned short *, const int, const int); + DSTSTT DSTSTT_US + void __builtin_vec_dststt (signed short *, const int, const int); + DSTSTT DSTSTT_SS + void __builtin_vec_dststt (unsigned int *, const int, const int); + DSTSTT DSTSTT_UI + void __builtin_vec_dststt (signed int *, const int, const int); + DSTSTT DSTSTT_SI + void __builtin_vec_dststt (unsigned long *, const int, const int); + DSTSTT DSTSTT_UL + void __builtin_vec_dststt (signed long *, const int, const int); + DSTSTT DSTSTT_SL + void __builtin_vec_dststt (unsigned long long *, const int, const int); + DSTSTT DSTSTT_ULL + void __builtin_vec_dststt (signed long long *, const int, const int); + DSTSTT DSTSTT_SLL + void __builtin_vec_dststt (float *, const int, const int); + DSTSTT DSTSTT_F + void __builtin_vec_dststt (vuc *, const int, const int); + DSTSTT DSTSTT_VUC + void __builtin_vec_dststt (vsc *, const int, const int); + DSTSTT DSTSTT_VSC + void __builtin_vec_dststt (vbc *, const int, const int); + DSTSTT DSTSTT_VBC + void __builtin_vec_dststt (vus *, const int, const int); + DSTSTT DSTSTT_VUS + void __builtin_vec_dststt (vss *, const int, const int); + DSTSTT DSTSTT_VSS + void __builtin_vec_dststt (vbs *, const int, const int); + DSTSTT DSTSTT_VBS + void __builtin_vec_dststt (vp *, const int, const int); + DSTSTT DSTSTT_VP + void __builtin_vec_dststt (vui *, const int, const int); + DSTSTT DSTSTT_VUI + void __builtin_vec_dststt (vsi *, const int, const int); + DSTSTT DSTSTT_VSI + void __builtin_vec_dststt (vbi *, const int, const int); + DSTSTT DSTSTT_VBI + void __builtin_vec_dststt (vf *, const int, const int); + DSTSTT DSTSTT_VF + +[VEC_DSTT, vec_dstt, __builtin_vec_dstt] + void __builtin_vec_dstt (unsigned char *, const int, const int); + DSTT DSTT_UC + void __builtin_vec_dstt (signed char *, const int, const int); + DSTT DSTT_SC + void __builtin_vec_dstt (unsigned short *, const int, const int); + DSTT DSTT_US + void __builtin_vec_dstt (signed short *, const int, const int); + DSTT DSTT_SS + void __builtin_vec_dstt (unsigned int *, const int, const int); + DSTT DSTT_UI + void __builtin_vec_dstt (signed int *, const int, const int); + DSTT DSTT_SI + void __builtin_vec_dstt (unsigned long *, const int, const int); + DSTT DSTT_UL + void __builtin_vec_dstt (signed long *, const int, const int); + DSTT DSTT_SL + void __builtin_vec_dstt (unsigned long long *, const int, const int); + DSTT DSTT_ULL + void __builtin_vec_dstt (signed long long *, const int, const int); + DSTT DSTT_SLL + void __builtin_vec_dstt (float *, const int, const int); + DSTT DSTT_F + void __builtin_vec_dstt (vuc *, const int, const int); + DSTT DSTT_VUC + void __builtin_vec_dstt (vsc *, const int, const int); + DSTT DSTT_VSC + void __builtin_vec_dstt (vbc *, const int, const int); + DSTT DSTT_VBC + void __builtin_vec_dstt (vus *, const int, const int); + DSTT DSTT_VUS + void __builtin_vec_dstt (vss *, const int, const int); + DSTT DSTT_VSS + void __builtin_vec_dstt (vbs *, const int, const int); + DSTT DSTT_VBS + void __builtin_vec_dstt (vp *, const int, const int); + DSTT DSTT_VP + void __builtin_vec_dstt (vui *, const int, const int); + DSTT DSTT_VUI + void __builtin_vec_dstt (vsi *, const int, const int); + DSTT DSTT_VSI + void __builtin_vec_dstt (vbi *, const int, const int); + DSTT DSTT_VBI + void __builtin_vec_dstt (vf *, const int, const int); + DSTT DSTT_VF + +[VEC_EQV, vec_eqv, __builtin_vec_eqv, _ARCH_PWR8] + vsc __builtin_vec_eqv (vsc, vsc); + EQV_V16QI + vuc __builtin_vec_eqv (vuc, vuc); + EQV_V16QI_UNS EQV_V16QI_VUC + vbc __builtin_vec_eqv (vbc, vbc); + EQV_V16QI_UNS EQV_V16QI_VBC + vss __builtin_vec_eqv (vss, vss); + EQV_V8HI + vus __builtin_vec_eqv (vus, vus); + EQV_V8HI_UNS EQV_V8HI_VUS + vbs __builtin_vec_eqv (vbs, vbs); + EQV_V8HI_UNS EQV_V8HI_VBS + vsi __builtin_vec_eqv (vsi, vsi); + EQV_V4SI + vui __builtin_vec_eqv (vui, vui); + EQV_V4SI_UNS EQV_V4SI_VUI + vbi __builtin_vec_eqv (vbi, vbi); + EQV_V4SI_UNS EQV_V4SI_VBI + vsll __builtin_vec_eqv (vsll, vsll); + EQV_V2DI + vull __builtin_vec_eqv (vull, vull); + EQV_V2DI_UNS EQV_V2DI_VULL + vbll __builtin_vec_eqv (vbll, vbll); + EQV_V2DI_UNS EQV_V2DI_VBLL + vf __builtin_vec_eqv (vf, vf); + EQV_V4SF + vd __builtin_vec_eqv (vd, vd); + EQV_V2DF +; The following variants are deprecated. + vsc __builtin_vec_eqv (vbc, vsc); + EQV_V16QI EQV_VBC_VSC + vsc __builtin_vec_eqv (vsc, vbc); + EQV_V16QI EQV_VSC_VBC + vuc __builtin_vec_eqv (vbc, vuc); + EQV_V16QI_UNS EQV_VBC_VUC + vuc __builtin_vec_eqv (vuc, vbc); + EQV_V16QI_UNS EQV_VUC_VBC + vss __builtin_vec_eqv (vbs, vss); + EQV_V8HI EQV_VBS_VSS + vss __builtin_vec_eqv (vss, vbs); + EQV_V8HI EQV_VSS_VBS + vus __builtin_vec_eqv (vbs, vus); + EQV_V8HI_UNS EQV_VBS_VUS + vus __builtin_vec_eqv (vus, vbs); + EQV_V8HI_UNS EQV_VUS_VBS + vsi __builtin_vec_eqv (vbi, vsi); + EQV_V4SI EQV_VBI_VSI + vsi __builtin_vec_eqv (vsi, vbi); + EQV_V4SI EQV_VSI_VBI + vui __builtin_vec_eqv (vbi, vui); + EQV_V4SI_UNS EQV_VBI_VUI + vui __builtin_vec_eqv (vui, vbi); + EQV_V4SI_UNS EQV_VUI_VBI + vsll __builtin_vec_eqv (vbll, vsll); + EQV_V2DI EQV_VBLL_VSLL + vsll __builtin_vec_eqv (vsll, vbll); + EQV_V2DI EQV_VSLL_VBLL + vull __builtin_vec_eqv (vbll, vull); + EQV_V2DI_UNS EQV_VBLL_VULL + vull __builtin_vec_eqv (vull, vbll); + EQV_V2DI_UNS EQV_VULL_VBLL + +[VEC_EXPANDM, vec_expandm, __builtin_vec_vexpandm, _ARCH_PWR10] + vuc __builtin_vec_vexpandm (vuc); + VEXPANDMB + vus __builtin_vec_vexpandm (vus); + VEXPANDMH + vui __builtin_vec_vexpandm (vui); + VEXPANDMW + vull __builtin_vec_vexpandm (vull); + VEXPANDMD + vuq __builtin_vec_vexpandm (vuq); + VEXPANDMQ + +[VEC_EXPTE, vec_expte, __builtin_vec_expte] + vf __builtin_vec_expte (vf); + VEXPTEFP + +; There are no actual builtins for vec_extract. There is special handling for +; this in altivec_resolve_overloaded_builtin in rs6000-c.c, where the call +; is replaced by "pointer tricks." The single overload here causes +; __builtin_vec_extract to be registered with the front end so this can +; happen. +[VEC_EXTRACT, vec_extract, __builtin_vec_extract] + vsi __builtin_vec_extract (vsi, signed int); + VSPLTW EXTRACT_FAKERY + +[VEC_EXTRACT_FP_FROM_SHORTH, vec_extract_fp32_from_shorth, __builtin_vec_vextract_fp_from_shorth, _ARCH_PWR9] + vf __builtin_vec_vextract_fp_from_shorth (vus); + VEXTRACT_FP_FROM_SHORTH + +[VEC_EXTRACT_FP_FROM_SHORTL, vec_extract_fp32_from_shortl, __builtin_vec_vextract_fp_from_shortl, _ARCH_PWR9] + vf __builtin_vec_vextract_fp_from_shortl (vus); + VEXTRACT_FP_FROM_SHORTL + +[VEC_EXTRACTH, vec_extracth, __builtin_vec_extracth, _ARCH_PWR10] + vull __builtin_vec_extracth (vuc, vuc, unsigned char); + VEXTRACTBR + vull __builtin_vec_extracth (vus, vus, unsigned char); + VEXTRACTHR + vull __builtin_vec_extracth (vui, vui, unsigned char); + VEXTRACTWR + vull __builtin_vec_extracth (vull, vull, unsigned char); + VEXTRACTDR + +[VEC_EXTRACTL, vec_extractl, __builtin_vec_extractl, _ARCH_PWR10] + vull __builtin_vec_extractl (vuc, vuc, unsigned char); + VEXTRACTBL + vull __builtin_vec_extractl (vus, vus, unsigned char); + VEXTRACTHL + vull __builtin_vec_extractl (vui, vui, unsigned char); + VEXTRACTWL + vull __builtin_vec_extractl (vull, vull, unsigned char); + VEXTRACTDL + +[VEC_EXTRACTM, vec_extractm, __builtin_vec_vextractm, _ARCH_PWR10] + signed int __builtin_vec_vextractm (vuc); + VEXTRACTMB + signed int __builtin_vec_vextractm (vus); + VEXTRACTMH + signed int __builtin_vec_vextractm (vui); + VEXTRACTMW + signed int __builtin_vec_vextractm (vull); + VEXTRACTMD + signed int __builtin_vec_vextractm (vuq); + VEXTRACTMQ + +[VEC_EXTRACT4B, vec_extract4b, __builtin_vec_extract4b, _ARCH_PWR9] + vull __builtin_vec_extract4b (vuc, const int); + EXTRACT4B + +[VEC_EXTULX, vec_xlx, __builtin_vec_vextulx, _ARCH_PWR9] + signed char __builtin_vec_vextulx (unsigned int, vsc); + VEXTUBLX VEXTUBLX_S + unsigned char __builtin_vec_vextulx (unsigned int, vuc); + VEXTUBLX VEXTUBLX_U + signed short __builtin_vec_vextulx (unsigned int, vss); + VEXTUHLX VEXTUHLX_S + unsigned short __builtin_vec_vextulx (unsigned int, vus); + VEXTUHLX VEXTUHLX_U + signed int __builtin_vec_vextulx (unsigned int, vsi); + VEXTUWLX VEXTUWLX_S + unsigned int __builtin_vec_vextulx (unsigned int, vui); + VEXTUWLX VEXTUWLX_U + float __builtin_vec_vextulx (unsigned int, vf); + VEXTUWLX VEXTUWLX_F + +[VEC_EXTURX, vec_xrx, __builtin_vec_vexturx, _ARCH_PWR9] + signed char __builtin_vec_vexturx (unsigned int, vsc); + VEXTUBRX VEXTUBRX_S + unsigned char __builtin_vec_vexturx (unsigned int, vuc); + VEXTUBRX VEXTUBRX_U + signed short __builtin_vec_vexturx (unsigned int, vss); + VEXTUHRX VEXTUHRX_S + unsigned short __builtin_vec_vexturx (unsigned int, vus); + VEXTUHRX VEXTUHRX_U + signed int __builtin_vec_vexturx (unsigned int, vsi); + VEXTUWRX VEXTUWRX_S + unsigned int __builtin_vec_vexturx (unsigned int, vui); + VEXTUWRX VEXTUWRX_U + float __builtin_vec_vexturx (unsigned int, vf); + VEXTUWRX VEXTUWRX_F + +[VEC_FIRSTMATCHINDEX, vec_first_match_index, __builtin_vec_first_match_index, _ARCH_PWR9] + unsigned int __builtin_vec_first_match_index (vsc, vsc); + VFIRSTMATCHINDEX_V16QI FIRSTMATCHINDEX_VSC + unsigned int __builtin_vec_first_match_index (vuc, vuc); + VFIRSTMATCHINDEX_V16QI FIRSTMATCHINDEX_VUC + unsigned int __builtin_vec_first_match_index (vss, vss); + VFIRSTMATCHINDEX_V8HI FIRSTMATCHINDEX_VSS + unsigned int __builtin_vec_first_match_index (vus, vus); + VFIRSTMATCHINDEX_V8HI FIRSTMATCHINDEX_VUS + unsigned int __builtin_vec_first_match_index (vsi, vsi); + VFIRSTMATCHINDEX_V4SI FIRSTMATCHINDEX_VSI + unsigned int __builtin_vec_first_match_index (vui, vui); + VFIRSTMATCHINDEX_V4SI FIRSTMATCHINDEX_VUI + +[VEC_FIRSTMATCHOREOSINDEX, vec_first_match_or_eos_index, __builtin_vec_first_match_or_eos_index, _ARCH_PWR9] + unsigned int __builtin_vec_first_match_or_eos_index (vsc, vsc); + VFIRSTMATCHOREOSINDEX_V16QI FIRSTMATCHOREOSINDEX_VSC + unsigned int __builtin_vec_first_match_or_eos_index (vuc, vuc); + VFIRSTMATCHOREOSINDEX_V16QI FIRSTMATCHOREOSINDEX_VUC + unsigned int __builtin_vec_first_match_or_eos_index (vss, vss); + VFIRSTMATCHOREOSINDEX_V8HI FIRSTMATCHOREOSINDEX_VSS + unsigned int __builtin_vec_first_match_or_eos_index (vus, vus); + VFIRSTMATCHOREOSINDEX_V8HI FIRSTMATCHOREOSINDEX_VUS + unsigned int __builtin_vec_first_match_or_eos_index (vsi, vsi); + VFIRSTMATCHOREOSINDEX_V4SI FIRSTMATCHOREOSINDEX_VSI + unsigned int __builtin_vec_first_match_or_eos_index (vui, vui); + VFIRSTMATCHOREOSINDEX_V4SI FIRSTMATCHOREOSINDEX_VUI + +[VEC_FIRSTMISMATCHINDEX, vec_first_mismatch_index, __builtin_vec_first_mismatch_index, _ARCH_PWR9] + unsigned int __builtin_vec_first_mismatch_index (vsc, vsc); + VFIRSTMISMATCHINDEX_V16QI FIRSTMISMATCHINDEX_VSC + unsigned int __builtin_vec_first_mismatch_index (vuc, vuc); + VFIRSTMISMATCHINDEX_V16QI FIRSTMISMATCHINDEX_VUC + unsigned int __builtin_vec_first_mismatch_index (vss, vss); + VFIRSTMISMATCHINDEX_V8HI FIRSTMISMATCHINDEX_VSS + unsigned int __builtin_vec_first_mismatch_index (vus, vus); + VFIRSTMISMATCHINDEX_V8HI FIRSTMISMATCHINDEX_VUS + unsigned int __builtin_vec_first_mismatch_index (vsi, vsi); + VFIRSTMISMATCHINDEX_V4SI FIRSTMISMATCHINDEX_VSI + unsigned int __builtin_vec_first_mismatch_index (vui, vui); + VFIRSTMISMATCHINDEX_V4SI FIRSTMISMATCHINDEX_VUI + +[VEC_FIRSTMISMATCHOREOSINDEX, vec_first_mismatch_or_eos_index, __builtin_vec_first_mismatch_or_eos_index, _ARCH_PWR9] + unsigned int __builtin_vec_first_mismatch_or_eos_index (vsc, vsc); + VFIRSTMISMATCHOREOSINDEX_V16QI FIRSTMISMATCHOREOSINDEX_VSC + unsigned int __builtin_vec_first_mismatch_or_eos_index (vuc, vuc); + VFIRSTMISMATCHOREOSINDEX_V16QI FIRSTMISMATCHOREOSINDEX_VUC + unsigned int __builtin_vec_first_mismatch_or_eos_index (vss, vss); + VFIRSTMISMATCHOREOSINDEX_V8HI FIRSTMISMATCHOREOSINDEX_VSS + unsigned int __builtin_vec_first_mismatch_or_eos_index (vus, vus); + VFIRSTMISMATCHOREOSINDEX_V8HI FIRSTMISMATCHOREOSINDEX_VUS + unsigned int __builtin_vec_first_mismatch_or_eos_index (vsi, vsi); + VFIRSTMISMATCHOREOSINDEX_V4SI FIRSTMISMATCHOREOSINDEX_VSI + unsigned int __builtin_vec_first_mismatch_or_eos_index (vui, vui); + VFIRSTMISMATCHOREOSINDEX_V4SI FIRSTMISMATCHOREOSINDEX_VUI + +[VEC_FLOAT, vec_float, __builtin_vec_float] + vf __builtin_vec_float (vsi); + XVCVSXWSP + vf __builtin_vec_float (vui); + XVCVUXWSP + +[VEC_FLOAT2, vec_float2, __builtin_vec_float2] + vf __builtin_vec_float2 (vsll, vsll); + FLOAT2_V2DI + vf __builtin_vec_float2 (vull, vull); + UNS_FLOAT2_V2DI + vf __builtin_vec_float2 (vd, vd); + FLOAT2_V2DF + +[VEC_FLOATE, vec_floate, __builtin_vec_floate] + vf __builtin_vec_floate (vsll); + FLOATE_V2DI + vf __builtin_vec_floate (vull); + UNS_FLOATE_V2DI + vf __builtin_vec_floate (vd); + FLOATE_V2DF + +[VEC_FLOATO, vec_floato, __builtin_vec_floato] + vf __builtin_vec_floato (vsll); + FLOATO_V2DI + vf __builtin_vec_floato (vull); + UNS_FLOATO_V2DI + vf __builtin_vec_floato (vd); + FLOATO_V2DF + +[VEC_FLOOR, vec_floor, __builtin_vec_floor] + vf __builtin_vec_floor (vf); + VRFIM + vd __builtin_vec_floor (vd); + XVRDPIM + +[VEC_GB, vec_gb, __builtin_vec_vgbbd, _ARCH_PWR8] + vsc __builtin_vec_vgbbd (vsc); + VGBBD VGBBD_S + vuc __builtin_vec_vgbbd (vuc); + VGBBD VGBBD_U + +[VEC_GENBM, vec_genbm, __builtin_vec_mtvsrbm, _ARCH_PWR10] + vuc __builtin_vec_mtvsrbm (unsigned long long); + MTVSRBM + +[VEC_GENHM, vec_genhm, __builtin_vec_mtvsrhm, _ARCH_PWR10] + vus __builtin_vec_mtvsrhm (unsigned long long); + MTVSRHM + +[VEC_GENWM, vec_genwm, __builtin_vec_mtvsrwm, _ARCH_PWR10] + vui __builtin_vec_mtvsrwm (unsigned long long); + MTVSRWM + +[VEC_GENDM, vec_gendm, __builtin_vec_mtvsrdm, _ARCH_PWR10] + vull __builtin_vec_mtvsrdm (unsigned long long); + MTVSRDM + +[VEC_GENQM, vec_genqm, __builtin_vec_mtvsrqm, _ARCH_PWR10] + vuq __builtin_vec_mtvsrqm (unsigned long long); + MTVSRQM + +[VEC_GENPCVM, vec_genpcvm, __builtin_vec_xxgenpcvm, _ARCH_PWR10] + vuc __builtin_vec_xxgenpcvm (vuc, const int); + XXGENPCVM_V16QI + vus __builtin_vec_xxgenpcvm (vus, const int); + XXGENPCVM_V8HI + vui __builtin_vec_xxgenpcvm (vui, const int); + XXGENPCVM_V4SI + vull __builtin_vec_xxgenpcvm (vull, const int); + XXGENPCVM_V2DI + +[VEC_GNB, vec_gnb, __builtin_vec_gnb, _ARCH_PWR10] + unsigned long long __builtin_vec_gnb (vuq, const int); + VGNB + +; There are no actual builtins for vec_insert. There is special handling for +; this in altivec_resolve_overloaded_builtin in rs6000-c.c, where the call +; is replaced by "pointer tricks." The single overload here causes +; __builtin_vec_insert to be registered with the front end so this can happen. +[VEC_INSERT, vec_insert, __builtin_vec_insert] + vsi __builtin_vec_insert (vsi, vsi, signed int); + XXPERMDI_4SI INSERT_FAKERY + +[VEC_INSERTH, vec_inserth, __builtin_vec_inserth, _ARCH_PWR10] + vuc __builtin_vec_inserth (unsigned char, vuc, unsigned int); + VINSERTGPRBR + vuc __builtin_vec_inserth (vuc, vuc, unsigned int); + VINSERTVPRBR + vus __builtin_vec_inserth (unsigned short, vus, unsigned int); + VINSERTGPRHR + vus __builtin_vec_inserth (vus, vus, unsigned int); + VINSERTVPRHR + vui __builtin_vec_inserth (unsigned int, vui, unsigned int); + VINSERTGPRWR + vui __builtin_vec_inserth (vui, vui, unsigned int); + VINSERTVPRWR + vull __builtin_vec_inserth (unsigned long long, vull, unsigned int); + VINSERTGPRDR + +[VEC_INSERTL, vec_insertl, __builtin_vec_insertl, _ARCH_PWR10] + vuc __builtin_vec_insertl (unsigned char, vuc, unsigned int); + VINSERTGPRBL + vuc __builtin_vec_insertl (vuc, vuc, unsigned int); + VINSERTVPRBL + vus __builtin_vec_insertl (unsigned short, vus, unsigned int); + VINSERTGPRHL + vus __builtin_vec_insertl (vus, vus, unsigned int); + VINSERTVPRHL + vui __builtin_vec_insertl (unsigned int, vui, unsigned int); + VINSERTGPRWL + vui __builtin_vec_insertl (vui, vui, unsigned int); + VINSERTVPRWL + vull __builtin_vec_insertl (unsigned long long, vull, unsigned int); + VINSERTGPRDL + +[VEC_INSERT4B, vec_insert4b, __builtin_vec_insert4b, _ARCH_PWR9] + vuc __builtin_vec_insert4b (vsi, vuc, const int); + INSERT4B INSERT4B_S + vuc __builtin_vec_insert4b (vui, vuc, const int); + INSERT4B INSERT4B_U + +[VEC_LD, vec_ld, __builtin_vec_ld] + vsc __builtin_vec_ld (signed long, const vsc *); + LVX_V16QI LVX_V16QI_VSC + vsc __builtin_vec_ld (signed long, const signed char *); + LVX_V16QI LVX_V16QI_SC + vuc __builtin_vec_ld (signed long, const vuc *); + LVX_V16QI LVX_V16QI_VUC + vuc __builtin_vec_ld (signed long, const unsigned char *); + LVX_V16QI LVX_V16QI_UC + vbc __builtin_vec_ld (signed long, const vbc *); + LVX_V16QI LVX_V16QI_VBC + vss __builtin_vec_ld (signed long, const vss *); + LVX_V8HI LVX_V8HI_VSS + vss __builtin_vec_ld (signed long, const signed short *); + LVX_V8HI LVX_V8HI_SS + vus __builtin_vec_ld (signed long, const vus *); + LVX_V8HI LVX_V8HI_VUS + vus __builtin_vec_ld (signed long, const unsigned short *); + LVX_V8HI LVX_V8HI_US + vbs __builtin_vec_ld (signed long, const vbs *); + LVX_V8HI LVX_V8HI_VBS + vp __builtin_vec_ld (signed long, const vp *); + LVX_V8HI LVX_V8HI_VP + vsi __builtin_vec_ld (signed long, const vsi *); + LVX_V4SI LVX_V4SI_VSI + vsi __builtin_vec_ld (signed long, const signed int *); + LVX_V4SI LVX_V4SI_SI + vui __builtin_vec_ld (signed long, const vui *); + LVX_V4SI LVX_V4SI_VUI + vui __builtin_vec_ld (signed long, const unsigned int *); + LVX_V4SI LVX_V4SI_UI + vbi __builtin_vec_ld (signed long, const vbi *); + LVX_V4SI LVX_V4SI_VBI + vsll __builtin_vec_ld (signed long, const vsll *); + LVX_V2DI LVX_V2DI_VSLL + vsll __builtin_vec_ld (signed long, const signed long long *); + LVX_V2DI LVX_V2DI_SLL + vull __builtin_vec_ld (signed long, const vull *); + LVX_V2DI LVX_V2DI_VULL + vull __builtin_vec_ld (signed long, const unsigned long long *); + LVX_V2DI LVX_V2DI_ULL + vbll __builtin_vec_ld (signed long, const vbll *); + LVX_V2DI LVX_V2DI_VBLL + vsq __builtin_vec_ld (signed long, const vsq *); + LVX_V1TI LVX_V1TI_VSQ + vuq __builtin_vec_ld (signed long, const vuq *); + LVX_V1TI LVX_V1TI_VUQ + vsq __builtin_vec_ld (signed long, const __int128 *); + LVX_V1TI LVX_V1TI_TI + vuq __builtin_vec_ld (signed long, const unsigned __int128 *); + LVX_V1TI LVX_V1TI_UTI + vf __builtin_vec_ld (signed long, const vf *); + LVX_V4SF LVX_V4SF_VF + vf __builtin_vec_ld (signed long, const float *); + LVX_V4SF LVX_V4SF_F + vd __builtin_vec_ld (signed long, const vd *); + LVX_V2DF LVX_V2DF_VD + vd __builtin_vec_ld (signed long, const double *); + LVX_V2DF LVX_V2DF_D +; The following variants are deprecated. + vsi __builtin_vec_ld (signed long, const long *); + LVX_V4SI LVX_V4SI_SL + vui __builtin_vec_ld (signed long, const unsigned long *); + LVX_V4SI LVX_V4SI_UL + +[VEC_LDE, vec_lde, __builtin_vec_lde] + vsc __builtin_vec_lde (signed long, const signed char *); + LVEBX LVEBX_SC + vuc __builtin_vec_lde (signed long, const unsigned char *); + LVEBX LVEBX_UC + vss __builtin_vec_lde (signed long, const signed short *); + LVEHX LVEHX_SS + vus __builtin_vec_lde (signed long, const unsigned short *); + LVEHX LVEHX_US + vsi __builtin_vec_lde (signed long, const signed int *); + LVEWX LVEWX_SI + vui __builtin_vec_lde (signed long, const unsigned int *); + LVEWX LVEWX_UI + vf __builtin_vec_lde (signed long, const float *); + LVEWX LVEWX_F +; The following variants are deprecated. + vsi __builtin_vec_lde (signed long, const long *); + LVEWX LVEWX_SL + vui __builtin_vec_lde (signed long, const unsigned long *); + LVEWX LVEWX_UL + +[VEC_LDL, vec_ldl, __builtin_vec_ldl] + vsc __builtin_vec_ldl (signed long, const vsc *); + LVXL_V16QI LVXL_V16QI_VSC + vsc __builtin_vec_ldl (signed long, const signed char *); + LVXL_V16QI LVXL_V16QI_SC + vuc __builtin_vec_ldl (signed long, const vuc *); + LVXL_V16QI LVXL_V16QI_VUC + vuc __builtin_vec_ldl (signed long, const unsigned char *); + LVXL_V16QI LVXL_V16QI_UC + vbc __builtin_vec_ldl (signed long, const vbc *); + LVXL_V16QI LVXL_V16QI_VBC + vss __builtin_vec_ldl (signed long, const vss *); + LVXL_V8HI LVXL_V8HI_VSS + vss __builtin_vec_ldl (signed long, const signed short *); + LVXL_V8HI LVXL_V8HI_SS + vus __builtin_vec_ldl (signed long, const vus *); + LVXL_V8HI LVXL_V8HI_VUS + vus __builtin_vec_ldl (signed long, const unsigned short *); + LVXL_V8HI LVXL_V8HI_US + vbs __builtin_vec_ldl (signed long, const vbs *); + LVXL_V8HI LVXL_V8HI_VBS + vp __builtin_vec_ldl (signed long, const vp *); + LVXL_V8HI LVXL_V8HI_VP + vsi __builtin_vec_ldl (signed long, const vsi *); + LVXL_V4SI LVXL_V4SI_VSI + vsi __builtin_vec_ldl (signed long, const signed int *); + LVXL_V4SI LVXL_V4SI_SI + vui __builtin_vec_ldl (signed long, const vui *); + LVXL_V4SI LVXL_V4SI_VUI + vui __builtin_vec_ldl (signed long, const unsigned int *); + LVXL_V4SI LVXL_V4SI_UI + vbi __builtin_vec_ldl (signed long, const vbi *); + LVXL_V4SI LVXL_V4SI_VBI + vsll __builtin_vec_ldl (signed long, const vsll *); + LVXL_V2DI LVXL_V2DI_VSLL + vsll __builtin_vec_ldl (signed long, const signed long long *); + LVXL_V2DI LVXL_V2DI_SLL + vull __builtin_vec_ldl (signed long, const vull *); + LVXL_V2DI LVXL_V2DI_VULL + vull __builtin_vec_ldl (signed long, const unsigned long long *); + LVXL_V2DI LVXL_V2DI_ULL + vbll __builtin_vec_ldl (signed long, const vbll *); + LVXL_V2DI LVXL_V2DI_VBLL + vf __builtin_vec_ldl (signed long, const vf *); + LVXL_V4SF LVXL_V4SF_VF + vf __builtin_vec_ldl (signed long, const float *); + LVXL_V4SF LVXL_V4SF_F + vd __builtin_vec_ldl (signed long, const vd *); + LVXL_V2DF LVXL_V2DF_VD + vd __builtin_vec_ldl (signed long, const double *); + LVXL_V2DF LVXL_V2DF_D + +[VEC_LOGE, vec_loge, __builtin_vec_loge] + vf __builtin_vec_loge (vf); + VLOGEFP + +[VEC_LVLX, vec_lvlx, __builtin_vec_lvlx, __PPU__] + vbc __builtin_vec_lvlx (signed long, const vbc *); + LVLX LVLX_VBC + vsc __builtin_vec_lvlx (signed long, const vsc *); + LVLX LVLX_VSC + vsc __builtin_vec_lvlx (signed long, const signed char *); + LVLX LVLX_SC + vuc __builtin_vec_lvlx (signed long, const vuc *); + LVLX LVLX_VUC + vuc __builtin_vec_lvlx (signed long, const unsigned char *); + LVLX LVLX_UC + vbs __builtin_vec_lvlx (signed long, const vbs *); + LVLX LVLX_VBS + vss __builtin_vec_lvlx (signed long, const vss *); + LVLX LVLX_VSS + vss __builtin_vec_lvlx (signed long, const signed short *); + LVLX LVLX_SS + vus __builtin_vec_lvlx (signed long, const vus *); + LVLX LVLX_VUS + vus __builtin_vec_lvlx (signed long, const unsigned short *); + LVLX LVLX_US + vp __builtin_vec_lvlx (signed long, const vp *); + LVLX LVLX_VP + vbi __builtin_vec_lvlx (signed long, const vbi *); + LVLX LVLX_VBI + vsi __builtin_vec_lvlx (signed long, const vsi *); + LVLX LVLX_VSI + vsi __builtin_vec_lvlx (signed long, const signed int *); + LVLX LVLX_SI + vui __builtin_vec_lvlx (signed long, const vui *); + LVLX LVLX_VUI + vui __builtin_vec_lvlx (signed long, const unsigned int *); + LVLX LVLX_UI + vf __builtin_vec_lvlx (signed long, const vf *); + LVLX LVLX_VF + vf __builtin_vec_lvlx (signed long, const float *); + LVLX LVLX_F + +[VEC_LVLXL, vec_lvlxl, __builtin_vec_lvlxl, __PPU__] + vbc __builtin_vec_lvlxl (signed long, const vbc *); + LVLXL LVLXL_VBC + vsc __builtin_vec_lvlxl (signed long, const vsc *); + LVLXL LVLXL_VSC + vsc __builtin_vec_lvlxl (signed long, const signed char *); + LVLXL LVLXL_SC + vuc __builtin_vec_lvlxl (signed long, const vuc *); + LVLXL LVLXL_VUC + vuc __builtin_vec_lvlxl (signed long, const unsigned char *); + LVLXL LVLXL_UC + vbs __builtin_vec_lvlxl (signed long, const vbs *); + LVLXL LVLXL_VBS + vss __builtin_vec_lvlxl (signed long, const vss *); + LVLXL LVLXL_VSS + vss __builtin_vec_lvlxl (signed long, const signed short *); + LVLXL LVLXL_SS + vus __builtin_vec_lvlxl (signed long, const vus *); + LVLXL LVLXL_VUS + vus __builtin_vec_lvlxl (signed long, const unsigned short *); + LVLXL LVLXL_US + vp __builtin_vec_lvlxl (signed long, const vp *); + LVLXL LVLXL_VP + vbi __builtin_vec_lvlxl (signed long, const vbi *); + LVLXL LVLXL_VBI + vsi __builtin_vec_lvlxl (signed long, const vsi *); + LVLXL LVLXL_VSI + vsi __builtin_vec_lvlxl (signed long, const signed int *); + LVLXL LVLXL_SI + vui __builtin_vec_lvlxl (signed long, const vui *); + LVLXL LVLXL_VUI + vui __builtin_vec_lvlxl (signed long, const unsigned int *); + LVLXL LVLXL_UI + vf __builtin_vec_lvlxl (signed long, const vf *); + LVLXL LVLXL_VF + vf __builtin_vec_lvlxl (signed long, const float *); + LVLXL LVLXL_F + +[VEC_LVRX, vec_lvrx, __builtin_vec_lvrx, __PPU__] + vbc __builtin_vec_lvrx (signed long, const vbc *); + LVRX LVRX_VBC + vsc __builtin_vec_lvrx (signed long, const vsc *); + LVRX LVRX_VSC + vsc __builtin_vec_lvrx (signed long, const signed char *); + LVRX LVRX_SC + vuc __builtin_vec_lvrx (signed long, const vuc *); + LVRX LVRX_VUC + vuc __builtin_vec_lvrx (signed long, const unsigned char *); + LVRX LVRX_UC + vbs __builtin_vec_lvrx (signed long, const vbs *); + LVRX LVRX_VBS + vss __builtin_vec_lvrx (signed long, const vss *); + LVRX LVRX_VSS + vss __builtin_vec_lvrx (signed long, const signed short *); + LVRX LVRX_SS + vus __builtin_vec_lvrx (signed long, const vus *); + LVRX LVRX_VUS + vus __builtin_vec_lvrx (signed long, const unsigned short *); + LVRX LVRX_US + vp __builtin_vec_lvrx (signed long, const vp *); + LVRX LVRX_VP + vbi __builtin_vec_lvrx (signed long, const vbi *); + LVRX LVRX_VBI + vsi __builtin_vec_lvrx (signed long, const vsi *); + LVRX LVRX_VSI + vsi __builtin_vec_lvrx (signed long, const signed int *); + LVRX LVRX_SI + vui __builtin_vec_lvrx (signed long, const vui *); + LVRX LVRX_VUI + vui __builtin_vec_lvrx (signed long, const unsigned int *); + LVRX LVRX_UI + vf __builtin_vec_lvrx (signed long, const vf *); + LVRX LVRX_VF + vf __builtin_vec_lvrx (signed long, const float *); + LVRX LVRX_F + +[VEC_LVRXL, vec_lvrxl, __builtin_vec_lvrxl, __PPU__] + vbc __builtin_vec_lvrxl (signed long, const vbc *); + LVRXL LVRXL_VBC + vsc __builtin_vec_lvrxl (signed long, const vsc *); + LVRXL LVRXL_VSC + vsc __builtin_vec_lvrxl (signed long, const signed char *); + LVRXL LVRXL_SC + vuc __builtin_vec_lvrxl (signed long, const vuc *); + LVRXL LVRXL_VUC + vuc __builtin_vec_lvrxl (signed long, const unsigned char *); + LVRXL LVRXL_UC + vbs __builtin_vec_lvrxl (signed long, const vbs *); + LVRXL LVRXL_VBS + vss __builtin_vec_lvrxl (signed long, const vss *); + LVRXL LVRXL_VSS + vss __builtin_vec_lvrxl (signed long, const signed short *); + LVRXL LVRXL_SS + vus __builtin_vec_lvrxl (signed long, const vus *); + LVRXL LVRXL_VUS + vus __builtin_vec_lvrxl (signed long, const unsigned short *); + LVRXL LVRXL_US + vp __builtin_vec_lvrxl (signed long, const vp *); + LVRXL LVRXL_VP + vbi __builtin_vec_lvrxl (signed long, const vbi *); + LVRXL LVRXL_VBI + vsi __builtin_vec_lvrxl (signed long, const vsi *); + LVRXL LVRXL_VSI + vsi __builtin_vec_lvrxl (signed long, const signed int *); + LVRXL LVRXL_SI + vui __builtin_vec_lvrxl (signed long, const vui *); + LVRXL LVRXL_VUI + vui __builtin_vec_lvrxl (signed long, const unsigned int *); + LVRXL LVRXL_UI + vf __builtin_vec_lvrxl (signed long, const vf *); + LVRXL LVRXL_VF + vf __builtin_vec_lvrxl (signed long, const float *); + LVRXL LVRXL_F + +[VEC_LVSL, vec_lvsl, __builtin_vec_lvsl] + vuc __builtin_vec_lvsl (signed long, const unsigned char *); + LVSL LVSL_UC + vuc __builtin_vec_lvsl (signed long, const signed char *); + LVSL LVSL_SC + vuc __builtin_vec_lvsl (signed long, const char *); + LVSL LVSL_STR + vuc __builtin_vec_lvsl (signed long, const unsigned short *); + LVSL LVSL_US + vuc __builtin_vec_lvsl (signed long, const signed short *); + LVSL LVSL_SS + vuc __builtin_vec_lvsl (signed long, const unsigned int *); + LVSL LVSL_UI + vuc __builtin_vec_lvsl (signed long, const signed int *); + LVSL LVSL_SI + vuc __builtin_vec_lvsl (signed long, const unsigned long *); + LVSL LVSL_UL + vuc __builtin_vec_lvsl (signed long, const signed long *); + LVSL LVSL_SL + vuc __builtin_vec_lvsl (signed long, const unsigned long long *); + LVSL LVSL_ULL + vuc __builtin_vec_lvsl (signed long, const signed long long *); + LVSL LVSL_SLL + vuc __builtin_vec_lvsl (signed long, const float *); + LVSL LVSL_F + vuc __builtin_vec_lvsl (signed long, const double *); + LVSL LVSL_D + +[VEC_LVSR, vec_lvsr, __builtin_vec_lvsr] + vuc __builtin_vec_lvsr (signed long, const unsigned char *); + LVSR LVSR_UC + vuc __builtin_vec_lvsr (signed long, const signed char *); + LVSR LVSR_SC + vuc __builtin_vec_lvsr (signed long, const char *); + LVSR LVSR_STR + vuc __builtin_vec_lvsr (signed long, const unsigned short *); + LVSR LVSR_US + vuc __builtin_vec_lvsr (signed long, const signed short *); + LVSR LVSR_SS + vuc __builtin_vec_lvsr (signed long, const unsigned int *); + LVSR LVSR_UI + vuc __builtin_vec_lvsr (signed long, const signed int *); + LVSR LVSR_SI + vuc __builtin_vec_lvsr (signed long, const unsigned long *); + LVSR LVSR_UL + vuc __builtin_vec_lvsr (signed long, const signed long *); + LVSR LVSR_SL + vuc __builtin_vec_lvsr (signed long, const unsigned long long *); + LVSR LVSR_ULL + vuc __builtin_vec_lvsr (signed long, const signed long long *); + LVSR LVSR_SLL + vuc __builtin_vec_lvsr (signed long, const float *); + LVSR LVSR_F + vuc __builtin_vec_lvsr (signed long, const double *); + LVSR LVSR_D + +[VEC_LXVL, vec_xl_len, __builtin_vec_lxvl, _ARCH_PPC64_PWR9] + vsc __builtin_vec_lxvl (const signed char *, unsigned int); + LXVL LXVL_VSC + vuc __builtin_vec_lxvl (const unsigned char *, unsigned int); + LXVL LXVL_VUC + vss __builtin_vec_lxvl (const signed short *, unsigned int); + LXVL LXVL_VSS + vus __builtin_vec_lxvl (const unsigned short *, unsigned int); + LXVL LXVL_VUS + vsi __builtin_vec_lxvl (const signed int *, unsigned int); + LXVL LXVL_VSI + vui __builtin_vec_lxvl (const unsigned int *, unsigned int); + LXVL LXVL_VUI + vsll __builtin_vec_lxvl (const signed long long *, unsigned int); + LXVL LXVL_VSLL + vull __builtin_vec_lxvl (const unsigned long long *, unsigned int); + LXVL LXVL_VULL + vsq __builtin_vec_lxvl (const signed __int128 *, unsigned int); + LXVL LXVL_VSQ + vuq __builtin_vec_lxvl (const unsigned __int128 *, unsigned int); + LXVL LXVL_VUQ + vf __builtin_vec_lxvl (const float *, unsigned int); + LXVL LXVL_VF + vd __builtin_vec_lxvl (const double *, unsigned int); + LXVL LXVL_VD + +[VEC_MADD, vec_madd, __builtin_vec_madd] + vss __builtin_vec_madd (vss, vss, vss); + VMLADDUHM VMLADDUHM_VSS + vss __builtin_vec_madd (vss, vus, vus); + VMLADDUHM VMLADDUHM_VSSVUS + vss __builtin_vec_madd (vus, vss, vss); + VMLADDUHM VMLADDUHM_VUSVSS + vus __builtin_vec_madd (vus, vus, vus); + VMLADDUHM VMLADDUHM_VUS + vf __builtin_vec_madd (vf, vf, vf); + VMADDFP + vd __builtin_vec_madd (vd, vd, vd); + XVMADDDP + +[VEC_MADDS, vec_madds, __builtin_vec_madds] + vss __builtin_vec_madds (vss, vss, vss); + VMHADDSHS + +[VEC_MAX, vec_max, __builtin_vec_max] + vsc __builtin_vec_max (vsc, vsc); + VMAXSB + vuc __builtin_vec_max (vuc, vuc); + VMAXUB + vss __builtin_vec_max (vss, vss); + VMAXSH + vus __builtin_vec_max (vus, vus); + VMAXUH + vsi __builtin_vec_max (vsi, vsi); + VMAXSW + vui __builtin_vec_max (vui, vui); + VMAXUW + vsll __builtin_vec_max (vsll, vsll); + VMAXSD + vull __builtin_vec_max (vull, vull); + VMAXUD + vf __builtin_vec_max (vf, vf); + VMAXFP + vd __builtin_vec_max (vd, vd); + XVMAXDP +; The following variants are deprecated. + vsc __builtin_vec_max (vsc, vbc); + VMAXSB VMAXSB_SB + vsc __builtin_vec_max (vbc, vsc); + VMAXSB VMAXSB_BS + vuc __builtin_vec_max (vuc, vbc); + VMAXUB VMAXUB_UB + vuc __builtin_vec_max (vbc, vuc); + VMAXUB VMAXUB_BU + vss __builtin_vec_max (vss, vbs); + VMAXSH VMAXSH_SB + vss __builtin_vec_max (vbs, vss); + VMAXSH VMAXSH_BS + vus __builtin_vec_max (vus, vbs); + VMAXUH VMAXUH_UB + vus __builtin_vec_max (vbs, vus); + VMAXUH VMAXUH_BU + vsi __builtin_vec_max (vsi, vbi); + VMAXSW VMAXSW_SB + vsi __builtin_vec_max (vbi, vsi); + VMAXSW VMAXSW_BS + vui __builtin_vec_max (vui, vbi); + VMAXUW VMAXUW_UB + vui __builtin_vec_max (vbi, vui); + VMAXUW VMAXUW_BU + vsll __builtin_vec_max (vsll, vbll); + VMAXSD VMAXSD_SB + vsll __builtin_vec_max (vbll, vsll); + VMAXSD VMAXSD_BS + vull __builtin_vec_max (vull, vbll); + VMAXUD VMAXUD_UB + vull __builtin_vec_max (vbll, vull); + VMAXUD VMAXUD_BU + +[VEC_MERGEE, vec_mergee, __builtin_vec_vmrgew, _ARCH_PWR8] + vsi __builtin_vec_vmrgew (vsi, vsi); + VMRGEW_V4SI VMRGEW_VSI + vui __builtin_vec_vmrgew (vui, vui); + VMRGEW_V4SI VMRGEW_VUI + vbi __builtin_vec_vmrgew (vbi, vbi); + VMRGEW_V4SI VMRGEW_VBI + vsll __builtin_vec_vmrgew (vsll, vsll); + VMRGEW_V2DI VMRGEW_VSLL + vull __builtin_vec_vmrgew (vull, vull); + VMRGEW_V2DI VMRGEW_VULL + vbll __builtin_vec_vmrgew (vbll, vbll); + VMRGEW_V2DI VMRGEW_VBLL + vf __builtin_vec_vmrgew (vf, vf); + VMRGEW_V4SF + vd __builtin_vec_vmrgew (vd, vd); + VMRGEW_V2DF + +[VEC_MERGEH, vec_mergeh, __builtin_vec_mergeh] + vbc __builtin_vec_mergeh (vbc, vbc); + VMRGHB VMRGHB_VBC + vsc __builtin_vec_mergeh (vsc, vsc); + VMRGHB VMRGHB_VSC + vuc __builtin_vec_mergeh (vuc, vuc); + VMRGHB VMRGHB_VUC + vbs __builtin_vec_mergeh (vbs, vbs); + VMRGHH VMRGHH_VBS + vss __builtin_vec_mergeh (vss, vss); + VMRGHH VMRGHH_VSS + vus __builtin_vec_mergeh (vus, vus); + VMRGHH VMRGHH_VUS + vp __builtin_vec_mergeh (vp, vp); + VMRGHH VMRGHH_VP + vbi __builtin_vec_mergeh (vbi, vbi); + VMRGHW VMRGHW_VBI + vsi __builtin_vec_mergeh (vsi, vsi); + VMRGHW VMRGHW_VSI + vui __builtin_vec_mergeh (vui, vui); + VMRGHW VMRGHW_VUI + vbll __builtin_vec_mergeh (vbll, vbll); + VEC_MERGEH_V2DI VEC_MERGEH_VBLL + vsll __builtin_vec_mergeh (vsll, vsll); + VEC_MERGEH_V2DI VEC_MERGEH_VSLL + vull __builtin_vec_mergeh (vull, vull); + VEC_MERGEH_V2DI VEC_MERGEH_VULL + vf __builtin_vec_mergeh (vf, vf); + VMRGHW VMRGHW_VF + vd __builtin_vec_mergeh (vd, vd); + VEC_MERGEH_V2DF +; The following variants are deprecated. + vsll __builtin_vec_mergeh (vsll, vbll); + VEC_MERGEH_V2DI VEC_MERGEH_VSLL_VBLL + vsll __builtin_vec_mergeh (vbll, vsll); + VEC_MERGEH_V2DI VEC_MERGEH_VBLL_VSLL + vull __builtin_vec_mergeh (vull, vbll); + VEC_MERGEH_V2DI VEC_MERGEH_VULL_VBLL + vull __builtin_vec_mergeh (vbll, vull); + VEC_MERGEH_V2DI VEC_MERGEH_VBLL_VULL + +[VEC_MERGEL, vec_mergel, __builtin_vec_mergel] + vbc __builtin_vec_mergel (vbc, vbc); + VMRGLB VMRGLB_VBC + vsc __builtin_vec_mergel (vsc, vsc); + VMRGLB VMRGLB_VSC + vuc __builtin_vec_mergel (vuc, vuc); + VMRGLB VMRGLB_VUC + vbs __builtin_vec_mergel (vbs, vbs); + VMRGLH VMRGLH_VBS + vss __builtin_vec_mergel (vss, vss); + VMRGLH VMRGLH_VSS + vus __builtin_vec_mergel (vus, vus); + VMRGLH VMRGLH_VUS + vp __builtin_vec_mergel (vp, vp); + VMRGLH VMRGLH_VP + vbi __builtin_vec_mergel (vbi, vbi); + VMRGLW VMRGLW_VBI + vsi __builtin_vec_mergel (vsi, vsi); + VMRGLW VMRGLW_VSI + vui __builtin_vec_mergel (vui, vui); + VMRGLW VMRGLW_VUI + vbll __builtin_vec_mergel (vbll, vbll); + VEC_MERGEL_V2DI VEC_MERGEL_VBLL + vsll __builtin_vec_mergel (vsll, vsll); + VEC_MERGEL_V2DI VEC_MERGEL_VSLL + vull __builtin_vec_mergel (vull, vull); + VEC_MERGEL_V2DI VEC_MERGEL_VULL + vf __builtin_vec_mergel (vf, vf); + VMRGLW VMRGLW_VF + vd __builtin_vec_mergel (vd, vd); + VEC_MERGEL_V2DF +; The following variants are deprecated. + vsll __builtin_vec_mergel (vsll, vbll); + VEC_MERGEL_V2DI VEC_MERGEL_VSLL_VBLL + vsll __builtin_vec_mergel (vbll, vsll); + VEC_MERGEL_V2DI VEC_MERGEL_VBLL_VSLL + vull __builtin_vec_mergel (vull, vbll); + VEC_MERGEL_V2DI VEC_MERGEL_VULL_VBLL + vull __builtin_vec_mergel (vbll, vull); + VEC_MERGEL_V2DI VEC_MERGEL_VBLL_VULL + +[VEC_MERGEO, vec_mergeo, __builtin_vec_vmrgow, _ARCH_PWR8] + vsi __builtin_vec_vmrgow (vsi, vsi); + VMRGOW_V4SI VMRGOW_VSI + vui __builtin_vec_vmrgow (vui, vui); + VMRGOW_V4SI VMRGOW_VUI + vbi __builtin_vec_vmrgow (vbi, vbi); + VMRGOW_V4SI VMRGOW_VBI + vsll __builtin_vec_vmrgow (vsll, vsll); + VMRGOW_V2DI VMRGOW_VSLL + vull __builtin_vec_vmrgow (vull, vull); + VMRGOW_V2DI VMRGOW_VULL + vbll __builtin_vec_vmrgow (vbll, vbll); + VMRGOW_V2DI VMRGOW_VBLL + vf __builtin_vec_vmrgow (vf, vf); + VMRGOW_V4SF + vd __builtin_vec_vmrgow (vd, vd); + VMRGOW_V2DF + +[VEC_MFVSCR, vec_mfvscr, __builtin_vec_mfvscr] + vus __builtin_vec_mfvscr (); + MFVSCR + +[VEC_MIN, vec_min, __builtin_vec_min] + vsc __builtin_vec_min (vsc, vsc); + VMINSB + vuc __builtin_vec_min (vuc, vuc); + VMINUB + vss __builtin_vec_min (vss, vss); + VMINSH + vus __builtin_vec_min (vus, vus); + VMINUH + vsi __builtin_vec_min (vsi, vsi); + VMINSW + vui __builtin_vec_min (vui, vui); + VMINUW + vsll __builtin_vec_min (vsll, vsll); + VMINSD + vull __builtin_vec_min (vull, vull); + VMINUD + vf __builtin_vec_min (vf, vf); + VMINFP + vd __builtin_vec_min (vd, vd); + XVMINDP +; The following variants are deprecated. + vsc __builtin_vec_min (vsc, vbc); + VMINSB VMINSB_SB + vsc __builtin_vec_min (vbc, vsc); + VMINSB VMINSB_BS + vuc __builtin_vec_min (vuc, vbc); + VMINUB VMINUB_UB + vuc __builtin_vec_min (vbc, vuc); + VMINUB VMINUB_BU + vss __builtin_vec_min (vss, vbs); + VMINSH VMINSH_SB + vss __builtin_vec_min (vbs, vss); + VMINSH VMINSH_BS + vus __builtin_vec_min (vus, vbs); + VMINUH VMINUH_UB + vus __builtin_vec_min (vbs, vus); + VMINUH VMINUH_BU + vsi __builtin_vec_min (vsi, vbi); + VMINSW VMINSW_SB + vsi __builtin_vec_min (vbi, vsi); + VMINSW VMINSW_BS + vui __builtin_vec_min (vui, vbi); + VMINUW VMINUW_UB + vui __builtin_vec_min (vbi, vui); + VMINUW VMINUW_BU + vsll __builtin_vec_min (vsll, vbll); + VMINSD VMINSD_SB + vsll __builtin_vec_min (vbll, vsll); + VMINSD VMINSD_BS + vull __builtin_vec_min (vull, vbll); + VMINUD VMINUD_UB + vull __builtin_vec_min (vbll, vull); + VMINUD VMINUD_BU + +[VEC_MLADD, vec_mladd, __builtin_vec_mladd] + vss __builtin_vec_mladd (vss, vss, vss); + VMLADDUHM VMLADDUHM_VSS2 + vss __builtin_vec_mladd (vss, vus, vus); + VMLADDUHM VMLADDUHM_VSSVUS2 + vss __builtin_vec_mladd (vus, vss, vss); + VMLADDUHM VMLADDUHM_VUSVSS2 + vus __builtin_vec_mladd (vus, vus, vus); + VMLADDUHM VMLADDUHM_VUS2 + +[VEC_MOD, vec_mod, __builtin_vec_mod, _ARCH_PWR10] + vsi __builtin_vec_mod (vsi, vsi); + VMODSW + vui __builtin_vec_mod (vui, vui); + VMODUW + vsll __builtin_vec_mod (vsll, vsll); + VMODSD + vull __builtin_vec_mod (vull, vull); + VMODUD + vsq __builtin_vec_mod (vsq, vsq); + MODS_V1TI + vuq __builtin_vec_mod (vuq, vuq); + MODU_V1TI + +[VEC_MRADDS, vec_mradds, __builtin_vec_mradds] + vss __builtin_vec_mradds (vss, vss, vss); + VMHRADDSHS + +[VEC_MSUB, vec_msub, __builtin_vec_msub, __VSX__] + vf __builtin_vec_msub (vf, vf, vf); + XVMSUBSP + vd __builtin_vec_msub (vd, vd, vd); + XVMSUBDP + +[VEC_MSUM, vec_msum, __builtin_vec_msum] + vui __builtin_vec_msum (vuc, vuc, vui); + VMSUMUBM + vsi __builtin_vec_msum (vsc, vuc, vsi); + VMSUMMBM + vui __builtin_vec_msum (vus, vus, vui); + VMSUMUHM + vsi __builtin_vec_msum (vss, vss, vsi); + VMSUMSHM + vsq __builtin_vec_msum (vsll, vsll, vsq); + VMSUMUDM VMSUMUDM_S + vuq __builtin_vec_msum (vull, vull, vuq); + VMSUMUDM VMSUMUDM_U + +[VEC_MSUMS, vec_msums, __builtin_vec_msums] + vui __builtin_vec_msums (vus, vus, vui); + VMSUMUHS + vsi __builtin_vec_msums (vss, vss, vsi); + VMSUMSHS + +[VEC_MTVSCR, vec_mtvscr, __builtin_vec_mtvscr] + void __builtin_vec_mtvscr (vbc); + MTVSCR MTVSCR_VBC + void __builtin_vec_mtvscr (vsc); + MTVSCR MTVSCR_VSC + void __builtin_vec_mtvscr (vuc); + MTVSCR MTVSCR_VUC + void __builtin_vec_mtvscr (vbs); + MTVSCR MTVSCR_VBS + void __builtin_vec_mtvscr (vss); + MTVSCR MTVSCR_VSS + void __builtin_vec_mtvscr (vus); + MTVSCR MTVSCR_VUS + void __builtin_vec_mtvscr (vp); + MTVSCR MTVSCR_VP + void __builtin_vec_mtvscr (vbi); + MTVSCR MTVSCR_VBI + void __builtin_vec_mtvscr (vsi); + MTVSCR MTVSCR_VSI + void __builtin_vec_mtvscr (vui); + MTVSCR MTVSCR_VUI + +; Note that the entries for VEC_MUL are currently ignored. See rs6000-c.c: +; altivec_resolve_overloaded_builtin, where there is special-case code for +; VEC_MUL. TODO: Is this really necessary? Investigate. Seven missing +; prototypes here...no corresponding builtins. Also added "vmulld" in P10 +; which could be used instead of MUL_V2DI, conditionally? +[VEC_MUL, vec_mul, __builtin_vec_mul] + vsll __builtin_vec_mul (vsll, vsll); + MUL_V2DI + vf __builtin_vec_mul (vf, vf); + XVMULSP + vd __builtin_vec_mul (vd, vd); + XVMULDP + +[VEC_MULE, vec_mule, __builtin_vec_mule] + vss __builtin_vec_mule (vsc, vsc); + VMULESB + vus __builtin_vec_mule (vuc, vuc); + VMULEUB + vsi __builtin_vec_mule (vss, vss); + VMULESH + vui __builtin_vec_mule (vus, vus); + VMULEUH + vsll __builtin_vec_mule (vsi, vsi); + VMULESW + vull __builtin_vec_mule (vui, vui); + VMULEUW + vsq __builtin_vec_mule (vsll, vsll); + VMULESD + vuq __builtin_vec_mule (vull, vull); + VMULEUD + +[VEC_MULH, vec_mulh, __builtin_vec_mulh, _ARCH_PWR10] + vsi __builtin_vec_mulh (vsi, vsi); + VMULHSW + vui __builtin_vec_mulh (vui, vui); + VMULHUW + vsll __builtin_vec_mulh (vsll, vsll); + VMULHSD + vull __builtin_vec_mulh (vull, vull); + VMULHUD + +[VEC_MULO, vec_mulo, __builtin_vec_mulo] + vss __builtin_vec_mulo (vsc, vsc); + VMULOSB + vus __builtin_vec_mulo (vuc, vuc); + VMULOUB + vsi __builtin_vec_mulo (vss, vss); + VMULOSH + vui __builtin_vec_mulo (vus, vus); + VMULOUH + vsll __builtin_vec_mulo (vsi, vsi); + VMULOSW + vull __builtin_vec_mulo (vui, vui); + VMULOUW + vsq __builtin_vec_mulo (vsll, vsll); + VMULOSD + vuq __builtin_vec_mulo (vull, vull); + VMULOUD + +[VEC_NABS, vec_nabs, __builtin_vec_nabs] + vsc __builtin_vec_nabs (vsc); + NABS_V16QI + vss __builtin_vec_nabs (vss); + NABS_V8HI + vsi __builtin_vec_nabs (vsi); + NABS_V4SI + vsll __builtin_vec_nabs (vsll); + NABS_V2DI + vf __builtin_vec_nabs (vf); + NABS_V4SF + vd __builtin_vec_nabs (vd); + NABS_V2DF + +[VEC_NAND, vec_nand, __builtin_vec_nand, _ARCH_PWR8] + vsc __builtin_vec_nand (vsc, vsc); + NAND_V16QI + vuc __builtin_vec_nand (vuc, vuc); + NAND_V16QI_UNS NAND_VUC + vbc __builtin_vec_nand (vbc, vbc); + NAND_V16QI_UNS NAND_VBC + vss __builtin_vec_nand (vss, vss); + NAND_V8HI + vus __builtin_vec_nand (vus, vus); + NAND_V8HI_UNS NAND_VUS + vbs __builtin_vec_nand (vbs, vbs); + NAND_V8HI_UNS NAND_VBS + vsi __builtin_vec_nand (vsi, vsi); + NAND_V4SI + vui __builtin_vec_nand (vui, vui); + NAND_V4SI_UNS NAND_VUI + vbi __builtin_vec_nand (vbi, vbi); + NAND_V4SI_UNS NAND_VBI + vsll __builtin_vec_nand (vsll, vsll); + NAND_V2DI + vull __builtin_vec_nand (vull, vull); + NAND_V2DI_UNS NAND_VULL + vbll __builtin_vec_nand (vbll, vbll); + NAND_V2DI_UNS NAND_VBLL + vf __builtin_vec_nand (vf, vf); + NAND_V4SF + vd __builtin_vec_nand (vd, vd); + NAND_V2DF +; The following variants are deprecated. + vsc __builtin_vec_nand (vbc, vsc); + NAND_V16QI NAND_VBC_VSC + vsc __builtin_vec_nand (vsc, vbc); + NAND_V16QI NAND_VSC_VBC + vuc __builtin_vec_nand (vbc, vuc); + NAND_V16QI_UNS NAND_VBC_VUC + vuc __builtin_vec_nand (vuc, vbc); + NAND_V16QI_UNS NAND_VUC_VBC + vss __builtin_vec_nand (vbs, vss); + NAND_V8HI NAND_VBS_VSS + vss __builtin_vec_nand (vss, vbs); + NAND_V8HI NAND_VSS_VBS + vus __builtin_vec_nand (vbs, vus); + NAND_V8HI_UNS NAND_VBS_VUS + vus __builtin_vec_nand (vus, vbs); + NAND_V8HI_UNS NAND_VUS_VBS + vsi __builtin_vec_nand (vbi, vsi); + NAND_V4SI NAND_VBI_VSI + vsi __builtin_vec_nand (vsi, vbi); + NAND_V4SI NAND_VSI_VBI + vui __builtin_vec_nand (vbi, vui); + NAND_V4SI_UNS NAND_VBI_VUI + vui __builtin_vec_nand (vui, vbi); + NAND_V4SI_UNS NAND_VUI_VBI + vsll __builtin_vec_nand (vbll, vsll); + NAND_V2DI NAND_VBLL_VSLL + vsll __builtin_vec_nand (vsll, vbll); + NAND_V2DI NAND_VSLL_VBLL + vull __builtin_vec_nand (vbll, vull); + NAND_V2DI_UNS NAND_VBLL_VULL + vull __builtin_vec_nand (vull, vbll); + NAND_V2DI_UNS NAND_VULL_VBLL + +[VEC_NCIPHER_BE, vec_ncipher_be, __builtin_vec_vncipher_be, _ARCH_PWR8] + vuc __builtin_vec_vncipher_be (vuc, vuc); + VNCIPHER_BE + +[VEC_NCIPHERLAST_BE, vec_ncipherlast_be, __builtin_vec_vncipherlast_be, _ARCH_PWR8] + vuc __builtin_vec_vncipherlast_be (vuc, vuc); + VNCIPHERLAST_BE + +[VEC_NEARBYINT, vec_nearbyint, __builtin_vec_nearbyint, __VSX__] + vf __builtin_vec_nearbyint (vf); + XVRSPI XVRSPI_NBI + vd __builtin_vec_nearbyint (vd); + XVRDPI XVRDPI_NBI + +[VEC_NEG, vec_neg, __builtin_vec_neg] + vsc __builtin_vec_neg (vsc); + NEG_V16QI + vss __builtin_vec_neg (vss); + NEG_V8HI + vsi __builtin_vec_neg (vsi); + NEG_V4SI + vsll __builtin_vec_neg (vsll); + NEG_V2DI + vf __builtin_vec_neg (vf); + NEG_V4SF + vd __builtin_vec_neg (vd); + NEG_V2DF + +[VEC_NMADD, vec_nmadd, __builtin_vec_nmadd, __VSX__] + vf __builtin_vec_nmadd (vf, vf, vf); + XVNMADDSP + vd __builtin_vec_nmadd (vd, vd, vd); + XVNMADDDP + +[VEC_NMSUB, vec_nmsub, __builtin_vec_nmsub] + vf __builtin_vec_nmsub (vf, vf, vf); + VNMSUBFP + vd __builtin_vec_nmsub (vd, vd, vd); + XVNMSUBDP + +[VEC_NOR, vec_nor, __builtin_vec_nor] + vsc __builtin_vec_nor (vsc, vsc); + VNOR_V16QI + vuc __builtin_vec_nor (vuc, vuc); + VNOR_V16QI_UNS VNOR_V16QI_U + vbc __builtin_vec_nor (vbc, vbc); + VNOR_V16QI_UNS VNOR_V16QI_B + vss __builtin_vec_nor (vss, vss); + VNOR_V8HI + vus __builtin_vec_nor (vus, vus); + VNOR_V8HI_UNS VNOR_V8HI_U + vbs __builtin_vec_nor (vbs, vbs); + VNOR_V8HI_UNS VNOR_V8HI_B + vsi __builtin_vec_nor (vsi, vsi); + VNOR_V4SI + vui __builtin_vec_nor (vui, vui); + VNOR_V4SI_UNS VNOR_V4SI_U + vbi __builtin_vec_nor (vbi, vbi); + VNOR_V4SI_UNS VNOR_V4SI_B + vsll __builtin_vec_nor (vsll, vsll); + VNOR_V2DI + vull __builtin_vec_nor (vull, vull); + VNOR_V2DI_UNS VNOR_V2DI_U + vbll __builtin_vec_nor (vbll, vbll); + VNOR_V2DI_UNS VNOR_V2DI_B + vsq __builtin_vec_nor (vsq, vsq); + VNOR_V1TI VNOR_V1TI_S + vuq __builtin_vec_nor (vuq, vuq); + VNOR_V1TI_UNS VNOR_V1TI_U + vf __builtin_vec_nor (vf, vf); + VNOR_V4SF + vd __builtin_vec_nor (vd, vd); + VNOR_V2DF +; The following variants are deprecated. + vsll __builtin_vec_nor (vsll, vbll); + VNOR_V2DI VNOR_VSLL_VBLL + vsll __builtin_vec_nor (vbll, vsll); + VNOR_V2DI VNOR_VBLL_VSLL + vull __builtin_vec_nor (vull, vbll); + VNOR_V2DI_UNS VNOR_VULL_VBLL + vull __builtin_vec_nor (vbll, vull); + VNOR_V2DI_UNS VNOR_VBLL_VULL + vsq __builtin_vec_nor (vsq, vbq); + VNOR_V1TI VNOR_VSQ_VBQ + vsq __builtin_vec_nor (vbq, vsq); + VNOR_V1TI VNOR_VBQ_VSQ + vuq __builtin_vec_nor (vuq, vbq); + VNOR_V1TI_UNS VNOR_VUQ_VBQ + vuq __builtin_vec_nor (vbq, vuq); + VNOR_V1TI_UNS VNOR_VBQ_VUQ + +[VEC_OR, vec_or, __builtin_vec_or] + vsc __builtin_vec_or (vsc, vsc); + VOR_V16QI + vuc __builtin_vec_or (vuc, vuc); + VOR_V16QI_UNS VOR_V16QI_U + vbc __builtin_vec_or (vbc, vbc); + VOR_V16QI_UNS VOR_V16QI_B + vss __builtin_vec_or (vss, vss); + VOR_V8HI + vus __builtin_vec_or (vus, vus); + VOR_V8HI_UNS VOR_V8HI_U + vbs __builtin_vec_or (vbs, vbs); + VOR_V8HI_UNS VOR_V8HI_B + vsi __builtin_vec_or (vsi, vsi); + VOR_V4SI + vui __builtin_vec_or (vui, vui); + VOR_V4SI_UNS VOR_V4SI_U + vbi __builtin_vec_or (vbi, vbi); + VOR_V4SI_UNS VOR_V4SI_B + vsll __builtin_vec_or (vsll, vsll); + VOR_V2DI + vull __builtin_vec_or (vull, vull); + VOR_V2DI_UNS VOR_V2DI_U + vbll __builtin_vec_or (vbll, vbll); + VOR_V2DI_UNS VOR_V2DI_B + vf __builtin_vec_or (vf, vf); + VOR_V4SF + vd __builtin_vec_or (vd, vd); + VOR_V2DF +; The following variants are deprecated. + vsc __builtin_vec_or (vsc, vbc); + VOR_V16QI VOR_VSC_VBC + vsc __builtin_vec_or (vbc, vsc); + VOR_V16QI VOR_VBC_VSC + vuc __builtin_vec_or (vuc, vbc); + VOR_V16QI_UNS VOR_V16QI_UB + vuc __builtin_vec_or (vbc, vuc); + VOR_V16QI_UNS VOR_V16QI_BU + vss __builtin_vec_or (vss, vbs); + VOR_V8HI VOR_VSS_VBS + vss __builtin_vec_or (vbs, vss); + VOR_V8HI VOR_VBS_VSS + vus __builtin_vec_or (vus, vbs); + VOR_V8HI_UNS VOR_V8HI_UB + vus __builtin_vec_or (vbs, vus); + VOR_V8HI_UNS VOR_V8HI_BU + vsi __builtin_vec_or (vsi, vbi); + VOR_V4SI VOR_VSI_VBI + vsi __builtin_vec_or (vbi, vsi); + VOR_V4SI VOR_VBI_VSI + vui __builtin_vec_or (vui, vbi); + VOR_V4SI_UNS VOR_V4SI_UB + vui __builtin_vec_or (vbi, vui); + VOR_V4SI_UNS VOR_V4SI_BU + vsll __builtin_vec_or (vsll, vbll); + VOR_V2DI VOR_VSLL_VBLL + vsll __builtin_vec_or (vbll, vsll); + VOR_V2DI VOR_VBLL_VSLL + vull __builtin_vec_or (vull, vbll); + VOR_V2DI_UNS VOR_V2DI_UB + vull __builtin_vec_or (vbll, vull); + VOR_V2DI_UNS VOR_V2DI_BU + vf __builtin_vec_or (vf, vbi); + VOR_V4SF VOR_VF_VBI + vf __builtin_vec_or (vbi, vf); + VOR_V4SF VOR_VBI_VF + vd __builtin_vec_or (vd, vbll); + VOR_V2DF VOR_VD_VBLL + vd __builtin_vec_or (vbll, vd); + VOR_V2DF VOR_VBLL_VD + +[VEC_ORC, vec_orc, __builtin_vec_orc, _ARCH_PWR8] + vsc __builtin_vec_orc (vsc, vsc); + ORC_V16QI + vuc __builtin_vec_orc (vuc, vuc); + ORC_V16QI_UNS ORC_VUC + vbc __builtin_vec_orc (vbc, vbc); + ORC_V16QI_UNS ORC_VBC + vss __builtin_vec_orc (vss, vss); + ORC_V8HI + vus __builtin_vec_orc (vus, vus); + ORC_V8HI_UNS ORC_VUS + vbs __builtin_vec_orc (vbs, vbs); + ORC_V8HI_UNS ORC_VBS + vsi __builtin_vec_orc (vsi, vsi); + ORC_V4SI + vui __builtin_vec_orc (vui, vui); + ORC_V4SI_UNS ORC_VUI + vbi __builtin_vec_orc (vbi, vbi); + ORC_V4SI_UNS ORC_VBI + vsll __builtin_vec_orc (vsll, vsll); + ORC_V2DI + vull __builtin_vec_orc (vull, vull); + ORC_V2DI_UNS ORC_VULL + vbll __builtin_vec_orc (vbll, vbll); + ORC_V2DI_UNS ORC_VBLL + vf __builtin_vec_orc (vf, vf); + ORC_V4SF + vd __builtin_vec_orc (vd, vd); + ORC_V2DF +; The following variants are deprecated. + vsc __builtin_vec_orc (vbc, vsc); + ORC_V16QI ORC_VBC_VSC + vsc __builtin_vec_orc (vsc, vbc); + ORC_V16QI ORC_VSC_VBC + vuc __builtin_vec_orc (vbc, vuc); + ORC_V16QI_UNS ORC_VBC_VUC + vuc __builtin_vec_orc (vuc, vbc); + ORC_V16QI_UNS ORC_VUC_VBC + vss __builtin_vec_orc (vbs, vss); + ORC_V8HI ORC_VBS_VSS + vss __builtin_vec_orc (vss, vbs); + ORC_V8HI ORC_VSS_VBS + vus __builtin_vec_orc (vbs, vus); + ORC_V8HI_UNS ORC_VBS_VUS + vus __builtin_vec_orc (vus, vbs); + ORC_V8HI_UNS ORC_VUS_VBS + vsi __builtin_vec_orc (vbi, vsi); + ORC_V4SI ORC_VBI_VSI + vsi __builtin_vec_orc (vsi, vbi); + ORC_V4SI ORC_VSI_VBI + vui __builtin_vec_orc (vbi, vui); + ORC_V4SI_UNS ORC_VBI_VUI + vui __builtin_vec_orc (vui, vbi); + ORC_V4SI_UNS ORC_VUI_VBI + vsll __builtin_vec_orc (vbll, vsll); + ORC_V2DI ORC_VBLL_VSLL + vsll __builtin_vec_orc (vsll, vbll); + ORC_V2DI ORC_VSLL_VBLL + vull __builtin_vec_orc (vbll, vull); + ORC_V2DI_UNS ORC_VBLL_VULL + vull __builtin_vec_orc (vull, vbll); + ORC_V2DI_UNS ORC_VULL_VBLL + +[VEC_PACK, vec_pack, __builtin_vec_pack] + vsc __builtin_vec_pack (vss, vss); + VPKUHUM VPKUHUM_VSS + vuc __builtin_vec_pack (vus, vus); + VPKUHUM VPKUHUM_VUS + vbc __builtin_vec_pack (vbs, vbs); + VPKUHUM VPKUHUM_VBS + vss __builtin_vec_pack (vsi, vsi); + VPKUWUM VPKUWUM_VSI + vus __builtin_vec_pack (vui, vui); + VPKUWUM VPKUWUM_VUI + vbs __builtin_vec_pack (vbi, vbi); + VPKUWUM VPKUWUM_VBI + vsi __builtin_vec_pack (vsll, vsll); + VPKUDUM VPKUDUM_VSLL + vui __builtin_vec_pack (vull, vull); + VPKUDUM VPKUDUM_VULL + vbi __builtin_vec_pack (vbll, vbll); + VPKUDUM VPKUDUM_VBLL + vf __builtin_vec_pack (vd, vd); + FLOAT2_V2DF FLOAT2_V2DF_PACK + +[VEC_PACKPX, vec_packpx, __builtin_vec_packpx] + vp __builtin_vec_packpx (vui, vui); + VPKPX + +[VEC_PACKS, vec_packs, __builtin_vec_packs] + vuc __builtin_vec_packs (vus, vus); + VPKUHUS VPKUHUS_S + vsc __builtin_vec_packs (vss, vss); + VPKSHSS + vus __builtin_vec_packs (vui, vui); + VPKUWUS VPKUWUS_S + vss __builtin_vec_packs (vsi, vsi); + VPKSWSS + vui __builtin_vec_packs (vull, vull); + VPKUDUS VPKUDUS_S + vsi __builtin_vec_packs (vsll, vsll); + VPKSDSS + +[VEC_PACKSU, vec_packsu, __builtin_vec_packsu] + vuc __builtin_vec_packsu (vus, vus); + VPKUHUS VPKUHUS_U + vuc __builtin_vec_packsu (vss, vss); + VPKSHUS + vus __builtin_vec_packsu (vui, vui); + VPKUWUS VPKUWUS_U + vus __builtin_vec_packsu (vsi, vsi); + VPKSWUS + vui __builtin_vec_packsu (vull, vull); + VPKUDUS VPKUDUS_U + vui __builtin_vec_packsu (vsll, vsll); + VPKSDUS + +[VEC_PDEP, vec_pdep, __builtin_vec_vpdepd, _ARCH_PWR10] + vull __builtin_vec_vpdepd (vull, vull); + VPDEPD + +[VEC_PERM, vec_perm, __builtin_vec_perm] + vsc __builtin_vec_perm (vsc, vsc, vuc); + VPERM_16QI + vuc __builtin_vec_perm (vuc, vuc, vuc); + VPERM_16QI_UNS VPERM_16QI_VUC + vbc __builtin_vec_perm (vbc, vbc, vuc); + VPERM_16QI_UNS VPERM_16QI_VBC + vss __builtin_vec_perm (vss, vss, vuc); + VPERM_8HI + vus __builtin_vec_perm (vus, vus, vuc); + VPERM_8HI_UNS VPERM_8HI_VUS + vbs __builtin_vec_perm (vbs, vbs, vuc); + VPERM_8HI_UNS VPERM_8HI_VBS + vp __builtin_vec_perm (vp, vp, vuc); + VPERM_8HI_UNS VPERM_8HI_VP + vsi __builtin_vec_perm (vsi, vsi, vuc); + VPERM_4SI + vui __builtin_vec_perm (vui, vui, vuc); + VPERM_4SI_UNS VPERM_4SI_VUI + vbi __builtin_vec_perm (vbi, vbi, vuc); + VPERM_4SI_UNS VPERM_4SI_VBI + vsll __builtin_vec_perm (vsll, vsll, vuc); + VPERM_2DI + vull __builtin_vec_perm (vull, vull, vuc); + VPERM_2DI_UNS VPERM_2DI_VULL + vbll __builtin_vec_perm (vbll, vbll, vuc); + VPERM_2DI_UNS VPERM_2DI_VBLL + vf __builtin_vec_perm (vf, vf, vuc); + VPERM_4SF + vd __builtin_vec_perm (vd, vd, vuc); + VPERM_2DF + vsq __builtin_vec_perm (vsq, vsq, vuc); + VPERM_1TI + vuq __builtin_vec_perm (vuq, vuq, vuc); + VPERM_1TI_UNS +; The following variants are deprecated. + vsc __builtin_vec_perm (vsc, vuc, vuc); + VPERM_16QI VPERM_VSC_VUC_VUC + vbc __builtin_vec_perm (vbc, vbc, vbc); + VPERM_16QI VPERM_VBC_VBC_VBC + +[VEC_PERMX, vec_permx, __builtin_vec_xxpermx, _ARCH_PWR10] + vsc __builtin_vec_xxpermx (vsc, vsc, vuc, const int); + XXPERMX_UV2DI XXPERMX_VSC + vuc __builtin_vec_xxpermx (vuc, vuc, vuc, const int); + XXPERMX_UV2DI XXPERMX_VUC + vss __builtin_vec_xxpermx (vss, vss, vuc, const int); + XXPERMX_UV2DI XXPERMX_VSS + vus __builtin_vec_xxpermx (vus, vus, vuc, const int); + XXPERMX_UV2DI XXPERMX_VUS + vsi __builtin_vec_xxpermx (vsi, vsi, vuc, const int); + XXPERMX_UV2DI XXPERMX_VSI + vui __builtin_vec_xxpermx (vui, vui, vuc, const int); + XXPERMX_UV2DI XXPERMX_VUI + vsll __builtin_vec_xxpermx (vsll, vsll, vuc, const int); + XXPERMX_UV2DI XXPERMX_VSLL + vull __builtin_vec_xxpermx (vull, vull, vuc, const int); + XXPERMX_UV2DI XXPERMX_VULL + vf __builtin_vec_xxpermx (vf, vf, vuc, const int); + XXPERMX_UV2DI XXPERMX_VF + vd __builtin_vec_xxpermx (vd, vd, vuc, const int); + XXPERMX_UV2DI XXPERMX_VD + +[VEC_PERMXOR, vec_permxor, __builtin_vec_vpermxor] + vsc __builtin_vec_vpermxor (vsc, vsc, vsc); + VPERMXOR VPERMXOR_VSC + vuc __builtin_vec_vpermxor (vuc, vuc, vuc); + VPERMXOR VPERMXOR_VUC + vbc __builtin_vec_vpermxor (vbc, vbc, vbc); + VPERMXOR VPERMXOR_VBC + +[VEC_PEXT, vec_pext, __builtin_vec_vpextd, _ARCH_PWR10] + vull __builtin_vec_vpextd (vull, vull); + VPEXTD + +[VEC_PMSUM, vec_pmsum_be, __builtin_vec_vpmsum] + vus __builtin_vec_vpmsum (vuc, vuc); + VPMSUMB VPMSUMB_V + vui __builtin_vec_vpmsum (vus, vus); + VPMSUMH VPMSUMH_V + vull __builtin_vec_vpmsum (vui, vui); + VPMSUMW VPMSUMW_V + vuq __builtin_vec_vpmsum (vull, vull); + VPMSUMD VPMSUMD_V + +[VEC_POPCNT, vec_popcnt, __builtin_vec_vpopcntu, _ARCH_PWR8] + vuc __builtin_vec_vpopcntu (vsc); + VPOPCNTB + vuc __builtin_vec_vpopcntu (vuc); + VPOPCNTUB + vus __builtin_vec_vpopcntu (vss); + VPOPCNTH + vus __builtin_vec_vpopcntu (vus); + VPOPCNTUH + vui __builtin_vec_vpopcntu (vsi); + VPOPCNTW + vui __builtin_vec_vpopcntu (vui); + VPOPCNTUW + vull __builtin_vec_vpopcntu (vsll); + VPOPCNTD + vull __builtin_vec_vpopcntu (vull); + VPOPCNTUD + +[VEC_PARITY_LSBB, vec_parity_lsbb, __builtin_vec_vparity_lsbb, _ARCH_PWR9] + vui __builtin_vec_vparity_lsbb (vsi); + VPRTYBW VPRTYBW_S + vui __builtin_vec_vparity_lsbb (vui); + VPRTYBW VPRTYBW_U + vull __builtin_vec_vparity_lsbb (vsll); + VPRTYBD VPRTYBD_S + vull __builtin_vec_vparity_lsbb (vull); + VPRTYBD VPRTYBD_U + vuq __builtin_vec_vparity_lsbb (vsq); + VPRTYBQ VPRTYBQ_S + vuq __builtin_vec_vparity_lsbb (vuq); + VPRTYBQ VPRTYBQ_U + +; There are no actual builtins for vec_promote. There is special handling for +; this in altivec_resolve_overloaded_builtin in rs6000-c.c, where the call +; is replaced by a constructor. The single overload here causes +; __builtin_vec_promote to be registered with the front end so that can happen. +[VEC_PROMOTE, vec_promote, __builtin_vec_promote] + vsi __builtin_vec_promote (vsi); + ABS_V4SI PROMOTE_FAKERY + +[VEC_RE, vec_re, __builtin_vec_re] + vf __builtin_vec_re (vf); + VREFP + vd __builtin_vec_re (vd); + XVREDP + +[VEC_RECIP, vec_recipdiv, __builtin_vec_recipdiv] + vf __builtin_vec_recipdiv (vf, vf); + RECIP_V4SF + vd __builtin_vec_recipdiv (vd, vd); + RECIP_V2DF + +[VEC_REPLACE_ELT, vec_replace_elt, __builtin_vec_replace_elt, _ARCH_PWR10] + vui __builtin_vec_replace_elt (vui, unsigned int, const int); + VREPLACE_ELT_UV4SI + vsi __builtin_vec_replace_elt (vsi, signed int, const int); + VREPLACE_ELT_V4SI + vull __builtin_vec_replace_elt (vull, unsigned long long, const int); + VREPLACE_ELT_UV2DI + vsll __builtin_vec_replace_elt (vsll, signed long long, const int); + VREPLACE_ELT_V2DI + vf __builtin_vec_replace_elt (vf, float, const int); + VREPLACE_ELT_V4SF + vd __builtin_vec_replace_elt (vd, double, const int); + VREPLACE_ELT_V2DF + +[VEC_REPLACE_UN, vec_replace_unaligned, __builtin_vec_replace_un, _ARCH_PWR10] + vui __builtin_vec_replace_un (vui, unsigned int, const int); + VREPLACE_UN_UV4SI + vsi __builtin_vec_replace_un (vsi, signed int, const int); + VREPLACE_UN_V4SI + vull __builtin_vec_replace_un (vull, unsigned long long, const int); + VREPLACE_UN_UV2DI + vsll __builtin_vec_replace_un (vsll, signed long long, const int); + VREPLACE_UN_V2DI + vf __builtin_vec_replace_un (vf, float, const int); + VREPLACE_UN_V4SF + vd __builtin_vec_replace_un (vd, double, const int); + VREPLACE_UN_V2DF + +[VEC_REVB, vec_revb, __builtin_vec_revb, _ARCH_PWR8] + vss __builtin_vec_revb (vss); + REVB_V8HI REVB_VSS + vus __builtin_vec_revb (vus); + REVB_V8HI REVB_VUS + vsi __builtin_vec_revb (vsi); + REVB_V4SI REVB_VSI + vui __builtin_vec_revb (vui); + REVB_V4SI REVB_VUI + vsll __builtin_vec_revb (vsll); + REVB_V2DI REVB_VSLL + vull __builtin_vec_revb (vull); + REVB_V2DI REVB_VULL + vsq __builtin_vec_revb (vsq); + REVB_V1TI REVB_VSQ + vuq __builtin_vec_revb (vuq); + REVB_V1TI REVB_VUQ + vf __builtin_vec_revb (vf); + REVB_V4SF + vd __builtin_vec_revb (vd); + REVB_V2DF +; The following variants are deprecated. + vsc __builtin_vec_revb (vsc); + REVB_V16QI REVB_VSC + vuc __builtin_vec_revb (vuc); + REVB_V16QI REVB_VUC + vbc __builtin_vec_revb (vbc); + REVB_V16QI REVB_VBC + vbs __builtin_vec_revb (vbs); + REVB_V8HI REVB_VBS + vbi __builtin_vec_revb (vbi); + REVB_V4SI REVB_VBI + vbll __builtin_vec_revb (vbll); + REVB_V2DI REVB_VBLL + +[VEC_REVE, vec_reve, __builtin_vec_vreve] + vsc __builtin_vec_vreve (vsc); + VREVE_V16QI VREVE_VSC + vuc __builtin_vec_vreve (vuc); + VREVE_V16QI VREVE_VUC + vbc __builtin_vec_vreve (vbc); + VREVE_V16QI VREVE_VBC + vss __builtin_vec_vreve (vss); + VREVE_V8HI VREVE_VSS + vus __builtin_vec_vreve (vus); + VREVE_V8HI VREVE_VUS + vbs __builtin_vec_vreve (vbs); + VREVE_V8HI VREVE_VBS + vsi __builtin_vec_vreve (vsi); + VREVE_V4SI VREVE_VSI + vui __builtin_vec_vreve (vui); + VREVE_V4SI VREVE_VUI + vbi __builtin_vec_vreve (vbi); + VREVE_V4SI VREVE_VBI + vsll __builtin_vec_vreve (vsll); + VREVE_V2DI VREVE_VSLL + vull __builtin_vec_vreve (vull); + VREVE_V2DI VREVE_VULL + vbll __builtin_vec_vreve (vbll); + VREVE_V2DI VREVE_VBLL + vf __builtin_vec_vreve (vf); + VREVE_V4SF + vd __builtin_vec_vreve (vd); + VREVE_V2DF + +[VEC_RINT, vec_rint, __builtin_vec_rint, __VSX__] + vf __builtin_vec_rint (vf); + XVRSPIC + vd __builtin_vec_rint (vd); + XVRDPIC + +[VEC_RL, vec_rl, __builtin_vec_rl] + vsc __builtin_vec_rl (vsc, vuc); + VRLB VRLB_VSC + vuc __builtin_vec_rl (vuc, vuc); + VRLB VRLB_VUC + vss __builtin_vec_rl (vss, vus); + VRLH VRLH_VSS + vus __builtin_vec_rl (vus, vus); + VRLH VRLH_VUS + vsi __builtin_vec_rl (vsi, vui); + VRLW VRLW_VSI + vui __builtin_vec_rl (vui, vui); + VRLW VRLW_VUI + vsll __builtin_vec_rl (vsll, vull); + VRLD VRLD_VSLL + vull __builtin_vec_rl (vull, vull); + VRLD VRLD_VULL + vsq __builtin_vec_rl (vsq, vuq); + VRLQ VRLQ_VSQ + vuq __builtin_vec_rl (vuq, vuq); + VRLQ VRLQ_VUQ + +[VEC_RLMI, vec_rlmi, __builtin_vec_rlmi, _ARCH_PWR9] + vui __builtin_vec_rlmi (vui, vui, vui); + VRLWMI + vull __builtin_vec_rlmi (vull, vull, vull); + VRLDMI + vsq __builtin_vec_rlmi (vsq, vsq, vuq); + VRLQMI VRLQMI_VSQ + vuq __builtin_vec_rlmi (vuq, vuq, vuq); + VRLQMI VRLQMI_VUQ + +[VEC_RLNM, vec_vrlnm, __builtin_vec_rlnm, _ARCH_PWR9] + vui __builtin_vec_rlnm (vui, vui); + VRLWNM + vull __builtin_vec_rlnm (vull, vull); + VRLDNM + vsq __builtin_vec_rlnm (vsq, vuq); + VRLQNM VRLQNM_VSQ + vuq __builtin_vec_rlnm (vuq, vuq); + VRLQNM VRLQNM_VUQ + +[VEC_ROUND, vec_round, __builtin_vec_round] + vf __builtin_vec_round (vf); + VRFIN + vd __builtin_vec_round (vd); + XVRDPI + +[VEC_RSQRT, vec_rsqrt, __builtin_vec_rsqrt] + vf __builtin_vec_rsqrt (vf); + RSQRT_4SF + vd __builtin_vec_rsqrt (vd); + RSQRT_2DF + +[VEC_RSQRTE, vec_rsqrte, __builtin_vec_rsqrte] + vf __builtin_vec_rsqrte (vf); + VRSQRTEFP + vd __builtin_vec_rsqrte (vd); + XVRSQRTEDP + +[VEC_SBOX_BE, vec_sbox_be, __builtin_vec_sbox_be, _ARCH_PWR8] + vuc __builtin_vec_sbox_be (vuc); + VSBOX_BE + +[VEC_SEL, vec_sel, __builtin_vec_sel] + vsc __builtin_vec_sel (vsc, vsc, vbc); + VSEL_16QI VSEL_16QI_B + vsc __builtin_vec_sel (vsc, vsc, vuc); + VSEL_16QI VSEL_16QI_U + vuc __builtin_vec_sel (vuc, vuc, vbc); + VSEL_16QI_UNS VSEL_16QI_UB + vuc __builtin_vec_sel (vuc, vuc, vuc); + VSEL_16QI_UNS VSEL_16QI_UU + vbc __builtin_vec_sel (vbc, vbc, vbc); + VSEL_16QI_UNS VSEL_16QI_BB + vbc __builtin_vec_sel (vbc, vbc, vuc); + VSEL_16QI_UNS VSEL_16QI_BU + vss __builtin_vec_sel (vss, vss, vbs); + VSEL_8HI VSEL_8HI_B + vss __builtin_vec_sel (vss, vss, vus); + VSEL_8HI VSEL_8HI_U + vus __builtin_vec_sel (vus, vus, vbs); + VSEL_8HI_UNS VSEL_8HI_UB + vus __builtin_vec_sel (vus, vus, vus); + VSEL_8HI_UNS VSEL_8HI_UU + vbs __builtin_vec_sel (vbs, vbs, vbs); + VSEL_8HI_UNS VSEL_8HI_BB + vbs __builtin_vec_sel (vbs, vbs, vus); + VSEL_8HI_UNS VSEL_8HI_BU + vsi __builtin_vec_sel (vsi, vsi, vbi); + VSEL_4SI VSEL_4SI_B + vsi __builtin_vec_sel (vsi, vsi, vui); + VSEL_4SI VSEL_4SI_U + vui __builtin_vec_sel (vui, vui, vbi); + VSEL_4SI_UNS VSEL_4SI_UB + vui __builtin_vec_sel (vui, vui, vui); + VSEL_4SI_UNS VSEL_4SI_UU + vbi __builtin_vec_sel (vbi, vbi, vbi); + VSEL_4SI_UNS VSEL_4SI_BB + vbi __builtin_vec_sel (vbi, vbi, vui); + VSEL_4SI_UNS VSEL_4SI_BU + vsll __builtin_vec_sel (vsll, vsll, vbll); + VSEL_2DI_B VSEL_2DI_B + vsll __builtin_vec_sel (vsll, vsll, vull); + VSEL_2DI_B VSEL_2DI_U + vull __builtin_vec_sel (vull, vull, vbll); + VSEL_2DI_UNS VSEL_2DI_UB + vull __builtin_vec_sel (vull, vull, vull); + VSEL_2DI_UNS VSEL_2DI_UU + vbll __builtin_vec_sel (vbll, vbll, vbll); + VSEL_2DI_UNS VSEL_2DI_BB + vbll __builtin_vec_sel (vbll, vbll, vull); + VSEL_2DI_UNS VSEL_2DI_BU + vf __builtin_vec_sel (vf, vf, vbi); + VSEL_4SF VSEL_4SF_B + vf __builtin_vec_sel (vf, vf, vui); + VSEL_4SF VSEL_4SF_U + vd __builtin_vec_sel (vd, vd, vbll); + VSEL_2DF VSEL_2DF_B + vd __builtin_vec_sel (vd, vd, vull); + VSEL_2DF VSEL_2DF_U +; The following variants are deprecated. + vsll __builtin_vec_sel (vsll, vsll, vsll); + VSEL_2DI_B VSEL_2DI_S + vull __builtin_vec_sel (vull, vull, vsll); + VSEL_2DI_UNS VSEL_2DI_US + vf __builtin_vec_sel (vf, vf, vf); + VSEL_4SF VSEL_4SF_F + vf __builtin_vec_sel (vf, vf, vsi); + VSEL_4SF VSEL_4SF_S + vd __builtin_vec_sel (vd, vd, vsll); + VSEL_2DF VSEL_2DF_S + vd __builtin_vec_sel (vd, vd, vd); + VSEL_2DF VSEL_2DF_D + +[VEC_SHASIGMA_BE, vec_shasigma_be, __builtin_crypto_vshasigma] + vui __builtin_crypto_vshasigma (vui, const int, const int); + VSHASIGMAW + vull __builtin_crypto_vshasigma (vull, const int, const int); + VSHASIGMAD + +[VEC_SIGNED, vec_signed, __builtin_vec_vsigned] + vsi __builtin_vec_vsigned (vf); + VEC_VSIGNED_V4SF + vsll __builtin_vec_vsigned (vd); + VEC_VSIGNED_V2DF + +[VEC_SIGNED2, vec_signed2, __builtin_vec_vsigned2] + vsi __builtin_vec_vsigned2 (vd, vd); + VEC_VSIGNED2_V2DF + +[VEC_SIGNEDE, vec_signede, __builtin_vec_vsignede] + vsi __builtin_vec_vsignede (vd); + VEC_VSIGNEDE_V2DF + +[VEC_SIGNEDO, vec_signedo, __builtin_vec_vsignedo] + vsi __builtin_vec_vsignedo (vd); + VEC_VSIGNEDO_V2DF + +[VEC_SIGNEXTI, vec_signexti, __builtin_vec_signexti, _ARCH_PWR9] + vsi __builtin_vec_signexti (vsc); + VSIGNEXTSB2W + vsi __builtin_vec_signexti (vss); + VSIGNEXTSH2W + +[VEC_SIGNEXTLL, vec_signextll, __builtin_vec_signextll, _ARCH_PWR9] + vsll __builtin_vec_signextll (vsc); + VSIGNEXTSB2D + vsll __builtin_vec_signextll (vss); + VSIGNEXTSH2D + vsll __builtin_vec_signextll (vsi); + VSIGNEXTSW2D + +[VEC_SIGNEXTQ, vec_signextq, __builtin_vec_signextq, _ARCH_PWR10] + vsq __builtin_vec_signextq (vsll); + VSIGNEXTSD2Q + +[VEC_SL, vec_sl, __builtin_vec_sl] + vsc __builtin_vec_sl (vsc, vuc); + VSLB VSLB_VSC + vuc __builtin_vec_sl (vuc, vuc); + VSLB VSLB_VUC + vss __builtin_vec_sl (vss, vus); + VSLH VSLH_VSS + vus __builtin_vec_sl (vus, vus); + VSLH VSLH_VUS + vsi __builtin_vec_sl (vsi, vui); + VSLW VSLW_VSI + vui __builtin_vec_sl (vui, vui); + VSLW VSLW_VUI + vsll __builtin_vec_sl (vsll, vull); + VSLD VSLD_VSLL + vull __builtin_vec_sl (vull, vull); + VSLD VSLD_VULL + vsq __builtin_vec_sl (vsq, vuq); + VSLQ VSLQ_VSQ + vuq __builtin_vec_sl (vuq, vuq); + VSLQ VSLQ_VUQ + +[VEC_SLD, vec_sld, __builtin_vec_sld] + vsc __builtin_vec_sld (vsc, vsc, const int); + VSLDOI_16QI VSLDOI_VSC + vbc __builtin_vec_sld (vbc, vbc, const int); + VSLDOI_16QI VSLDOI_VBC + vuc __builtin_vec_sld (vuc, vuc, const int); + VSLDOI_16QI VSLDOI_VUC + vss __builtin_vec_sld (vss, vss, const int); + VSLDOI_8HI VSLDOI_VSS + vbs __builtin_vec_sld (vbs, vbs, const int); + VSLDOI_8HI VSLDOI_VBS + vus __builtin_vec_sld (vus, vus, const int); + VSLDOI_8HI VSLDOI_VUS + vp __builtin_vec_sld (vp, vp, const int); + VSLDOI_8HI VSLDOI_VP + vsi __builtin_vec_sld (vsi, vsi, const int); + VSLDOI_4SI VSLDOI_VSI + vbi __builtin_vec_sld (vbi, vbi, const int); + VSLDOI_4SI VSLDOI_VBI + vui __builtin_vec_sld (vui, vui, const int); + VSLDOI_4SI VSLDOI_VUI + vsll __builtin_vec_sld (vsll, vsll, const int); + VSLDOI_2DI VSLDOI_VSLL + vbll __builtin_vec_sld (vbll, vbll, const int); + VSLDOI_2DI VSLDOI_VBLL + vull __builtin_vec_sld (vull, vull, const int); + VSLDOI_2DI VSLDOI_VULL + vf __builtin_vec_sld (vf, vf, const int); + VSLDOI_4SF + vd __builtin_vec_sld (vd, vd, const int); + VSLDOI_2DF + +[VEC_SLDB, vec_sldb, __builtin_vec_sldb, _ARCH_PWR10] + vsc __builtin_vec_sldb (vsc, vsc, const int); + VSLDB_V16QI VSLDB_VSC + vuc __builtin_vec_sldb (vuc, vuc, const int); + VSLDB_V16QI VSLDB_VUC + vss __builtin_vec_sldb (vss, vss, const int); + VSLDB_V8HI VSLDB_VSS + vus __builtin_vec_sldb (vus, vus, const int); + VSLDB_V8HI VSLDB_VUS + vsi __builtin_vec_sldb (vsi, vsi, const int); + VSLDB_V4SI VSLDB_VSI + vui __builtin_vec_sldb (vui, vui, const int); + VSLDB_V4SI VSLDB_VUI + vsll __builtin_vec_sldb (vsll, vsll, const int); + VSLDB_V2DI VSLDB_VSLL + vull __builtin_vec_sldb (vull, vull, const int); + VSLDB_V2DI VSLDB_VULL + +[VEC_SLDW, vec_sldw, __builtin_vec_sldw] + vsc __builtin_vec_sldw (vsc, vsc, const int); + XXSLDWI_16QI XXSLDWI_VSC + vuc __builtin_vec_sldw (vuc, vuc, const int); + XXSLDWI_16QI XXSLDWI_VUC + vss __builtin_vec_sldw (vss, vss, const int); + XXSLDWI_8HI XXSLDWI_VSS + vus __builtin_vec_sldw (vus, vus, const int); + XXSLDWI_8HI XXSLDWI_VUS + vsi __builtin_vec_sldw (vsi, vsi, const int); + XXSLDWI_4SI XXSLDWI_VSI + vui __builtin_vec_sldw (vui, vui, const int); + XXSLDWI_4SI XXSLDWI_VUI + vsll __builtin_vec_sldw (vsll, vsll, const int); + XXSLDWI_2DI XXSLDWI_VSLL + vull __builtin_vec_sldw (vull, vull, const int); + XXSLDWI_2DI XXSLDWI_VULL + +[VEC_SLL, vec_sll, __builtin_vec_sll] + vsc __builtin_vec_sll (vsc, vuc); + VSL VSL_VSC + vuc __builtin_vec_sll (vuc, vuc); + VSL VSL_VUC + vss __builtin_vec_sll (vss, vuc); + VSL VSL_VSS + vus __builtin_vec_sll (vus, vuc); + VSL VSL_VUS + vp __builtin_vec_sll (vp, vuc); + VSL VSL_VP + vsi __builtin_vec_sll (vsi, vuc); + VSL VSL_VSI + vui __builtin_vec_sll (vui, vuc); + VSL VSL_VUI + vsll __builtin_vec_sll (vsll, vuc); + VSL VSL_VSLL + vull __builtin_vec_sll (vull, vuc); + VSL VSL_VULL +; The following variants are deprecated. + vsc __builtin_vec_sll (vsc, vus); + VSL VSL_VSC_VUS + vsc __builtin_vec_sll (vsc, vui); + VSL VSL_VSC_VUI + vuc __builtin_vec_sll (vuc, vus); + VSL VSL_VUC_VUS + vuc __builtin_vec_sll (vuc, vui); + VSL VSL_VUC_VUI + vbc __builtin_vec_sll (vbc, vuc); + VSL VSL_VBC_VUC + vbc __builtin_vec_sll (vbc, vus); + VSL VSL_VBC_VUS + vbc __builtin_vec_sll (vbc, vui); + VSL VSL_VBC_VUI + vss __builtin_vec_sll (vss, vus); + VSL VSL_VSS_VUS + vss __builtin_vec_sll (vss, vui); + VSL VSL_VSS_VUI + vus __builtin_vec_sll (vus, vus); + VSL VSL_VUS_VUS + vus __builtin_vec_sll (vus, vui); + VSL VSL_VUS_VUI + vbs __builtin_vec_sll (vbs, vuc); + VSL VSL_VBS_VUC + vbs __builtin_vec_sll (vbs, vus); + VSL VSL_VBS_VUS + vbs __builtin_vec_sll (vbs, vui); + VSL VSL_VBS_VUI + vp __builtin_vec_sll (vp, vus); + VSL VSL_VP_VUS + vp __builtin_vec_sll (vp, vui); + VSL VSL_VP_VUI + vsi __builtin_vec_sll (vsi, vus); + VSL VSL_VSI_VUS + vsi __builtin_vec_sll (vsi, vui); + VSL VSL_VSI_VUI + vui __builtin_vec_sll (vui, vus); + VSL VSL_VUI_VUS + vui __builtin_vec_sll (vui, vui); + VSL VSL_VUI_VUI + vbi __builtin_vec_sll (vbi, vuc); + VSL VSL_VBI_VUC + vbi __builtin_vec_sll (vbi, vus); + VSL VSL_VBI_VUS + vbi __builtin_vec_sll (vbi, vui); + VSL VSL_VBI_VUI + vbll __builtin_vec_sll (vbll, vuc); + VSL VSL_VBLL_VUC + vbll __builtin_vec_sll (vbll, vus); + VSL VSL_VBLL_VUS + vbll __builtin_vec_sll (vbll, vull); + VSL VSL_VBLL_VULL + +[VEC_SLO, vec_slo, __builtin_vec_slo] + vsc __builtin_vec_slo (vsc, vsc); + VSLO VSLO_VSCS + vsc __builtin_vec_slo (vsc, vuc); + VSLO VSLO_VSCU + vuc __builtin_vec_slo (vuc, vsc); + VSLO VSLO_VUCS + vuc __builtin_vec_slo (vuc, vuc); + VSLO VSLO_VUCU + vss __builtin_vec_slo (vss, vsc); + VSLO VSLO_VSSS + vss __builtin_vec_slo (vss, vuc); + VSLO VSLO_VSSU + vus __builtin_vec_slo (vus, vsc); + VSLO VSLO_VUSS + vus __builtin_vec_slo (vus, vuc); + VSLO VSLO_VUSU + vp __builtin_vec_slo (vp, vsc); + VSLO VSLO_VPS + vp __builtin_vec_slo (vp, vuc); + VSLO VSLO_VPU + vsi __builtin_vec_slo (vsi, vsc); + VSLO VSLO_VSIS + vsi __builtin_vec_slo (vsi, vuc); + VSLO VSLO_VSIU + vui __builtin_vec_slo (vui, vsc); + VSLO VSLO_VUIS + vui __builtin_vec_slo (vui, vuc); + VSLO VSLO_VUIU + vsll __builtin_vec_slo (vsll, vsc); + VSLO VSLO_VSLLS + vsll __builtin_vec_slo (vsll, vuc); + VSLO VSLO_VSLLU + vull __builtin_vec_slo (vull, vsc); + VSLO VSLO_VULLS + vull __builtin_vec_slo (vull, vuc); + VSLO VSLO_VULLU + vf __builtin_vec_slo (vf, vsc); + VSLO VSLO_VFS + vf __builtin_vec_slo (vf, vuc); + VSLO VSLO_VFU + +[VEC_SLV, vec_slv, __builtin_vec_vslv, _ARCH_PWR9] + vuc __builtin_vec_vslv (vuc, vuc); + VSLV + +[VEC_SPLAT, vec_splat, __builtin_vec_splat] + vsc __builtin_vec_splat (vsc, signed int); + VSPLTB VSPLTB_VSC + vuc __builtin_vec_splat (vuc, signed int); + VSPLTB VSPLTB_VUC + vbc __builtin_vec_splat (vbc, signed int); + VSPLTB VSPLTB_VBC + vss __builtin_vec_splat (vss, signed int); + VSPLTH VSPLTH_VSS + vus __builtin_vec_splat (vus, signed int); + VSPLTH VSPLTH_VUS + vbs __builtin_vec_splat (vbs, signed int); + VSPLTH VSPLTH_VBS + vp __builtin_vec_splat (vp, signed int); + VSPLTH VSPLTH_VP + vf __builtin_vec_splat (vf, signed int); + VSPLTW VSPLTW_VF + vsi __builtin_vec_splat (vsi, signed int); + VSPLTW VSPLTW_VSI + vui __builtin_vec_splat (vui, signed int); + VSPLTW VSPLTW_VUI + vbi __builtin_vec_splat (vbi, signed int); + VSPLTW VSPLTW_VBI + vd __builtin_vec_splat (vd, signed int); + XXSPLTD_V2DF + vsll __builtin_vec_splat (vsll, signed int); + XXSPLTD_V2DI XXSPLTD_VSLL + vull __builtin_vec_splat (vull, signed int); + XXSPLTD_V2DI XXSPLTD_VULL + vbll __builtin_vec_splat (vbll, signed int); + XXSPLTD_V2DI XXSPLTD_VBLL + +[VEC_SPLAT_S8, vec_splat_s8, __builtin_vec_splat_s8] + vsc __builtin_vec_splat_s8 (signed int); + VSPLTISB + +[VEC_SPLAT_S16, vec_splat_s16, __builtin_vec_splat_s16] + vss __builtin_vec_splat_s16 (signed int); + VSPLTISH + +[VEC_SPLAT_S32, vec_splat_s32, __builtin_vec_splat_s32] + vsi __builtin_vec_splat_s32 (signed int); + VSPLTISW + +; There are no entries for vec_splat_u{8,16,32}. These are handled +; in altivec.h with a #define and a cast. + +[VEC_SPLATI, vec_splati, __builtin_vec_xxspltiw, _ARCH_PWR10] + vsi __builtin_vec_xxspltiw (signed int); + VXXSPLTIW_V4SI + vf __builtin_vec_xxspltiw (float); + VXXSPLTIW_V4SF + +[VEC_SPLATID, vec_splatid, __builtin_vec_xxspltid, _ARCH_PWR10] + vd __builtin_vec_xxspltid (float); + VXXSPLTIDP + +[VEC_SPLATI_INS, vec_splati_ins, __builtin_vec_xxsplti32dx, _ARCH_PWR10] + vsi __builtin_vec_xxsplti32dx (vsi, const int, signed int); + VXXSPLTI32DX_V4SI VXXSPLTI32DX_VSI + vui __builtin_vec_xxsplti32dx (vui, const int, unsigned int); + VXXSPLTI32DX_V4SI VXXSPLTI32DX_VUI + vf __builtin_vec_xxsplti32dx (vf, const int, float); + VXXSPLTI32DX_V4SF + +; There are no actual builtins for vec_splats. There is special handling for +; this in altivec_resolve_overloaded_builtin in rs6000-c.c, where the call +; is replaced by a constructor. The single overload here causes +; __builtin_vec_splats to be registered with the front end so that can happen. +[VEC_SPLATS, vec_splats, __builtin_vec_splats] + vsi __builtin_vec_splats (vsi); + ABS_V4SI SPLATS_FAKERY + +[VEC_SQRT, vec_sqrt, __builtin_vec_sqrt, __VSX__] + vf __builtin_vec_sqrt (vf); + XVSQRTSP + vd __builtin_vec_sqrt (vd); + XVSQRTDP + +[VEC_SR, vec_sr, __builtin_vec_sr] + vsc __builtin_vec_sr (vsc, vuc); + VSRB VSRB_VSC + vuc __builtin_vec_sr (vuc, vuc); + VSRB VSRB_VUC + vss __builtin_vec_sr (vss, vus); + VSRH VSRH_VSS + vus __builtin_vec_sr (vus, vus); + VSRH VSRH_VUS + vsi __builtin_vec_sr (vsi, vui); + VSRW VSRW_VSI + vui __builtin_vec_sr (vui, vui); + VSRW VSRW_VUI + vsll __builtin_vec_sr (vsll, vull); + VSRD VSRD_VSLL + vull __builtin_vec_sr (vull, vull); + VSRD VSRD_VULL + vsq __builtin_vec_sr (vsq, vuq); + VSRQ VSRQ_VSQ + vuq __builtin_vec_sr (vuq, vuq); + VSRQ VSRQ_VUQ + +[VEC_SRA, vec_sra, __builtin_vec_sra] + vsc __builtin_vec_sra (vsc, vuc); + VSRAB VSRAB_VSC + vuc __builtin_vec_sra (vuc, vuc); + VSRAB VSRAB_VUC + vss __builtin_vec_sra (vss, vus); + VSRAH VSRAH_VSS + vus __builtin_vec_sra (vus, vus); + VSRAH VSRAH_VUS + vsi __builtin_vec_sra (vsi, vui); + VSRAW VSRAW_VSI + vui __builtin_vec_sra (vui, vui); + VSRAW VSRAW_VUI + vsll __builtin_vec_sra (vsll, vull); + VSRAD VSRAD_VSLL + vull __builtin_vec_sra (vull, vull); + VSRAD VSRAD_VULL + vsq __builtin_vec_sra (vsq, vuq); + VSRAQ VSRAQ_VSQ + vuq __builtin_vec_sra (vuq, vuq); + VSRAQ VSRAQ_VUQ + +[VEC_SRDB, vec_srdb, __builtin_vec_srdb, _ARCH_PWR10] + vsc __builtin_vec_srdb (vsc, vsc, const int); + VSRDB_V16QI VSRDB_VSC + vuc __builtin_vec_srdb (vuc, vuc, const int); + VSRDB_V16QI VSRDB_VUC + vss __builtin_vec_srdb (vss, vss, const int); + VSRDB_V8HI VSRDB_VSS + vus __builtin_vec_srdb (vus, vus, const int); + VSRDB_V8HI VSRDB_VUS + vsi __builtin_vec_srdb (vsi, vsi, const int); + VSRDB_V4SI VSRDB_VSI + vui __builtin_vec_srdb (vui, vui, const int); + VSRDB_V4SI VSRDB_VUI + vsll __builtin_vec_srdb (vsll, vsll, const int); + VSRDB_V2DI VSRDB_VSLL + vull __builtin_vec_srdb (vull, vull, const int); + VSRDB_V2DI VSRDB_VULL + +[VEC_SRL, vec_srl, __builtin_vec_srl] + vsc __builtin_vec_srl (vsc, vuc); + VSR VSR_VSC + vuc __builtin_vec_srl (vuc, vuc); + VSR VSR_VUC + vss __builtin_vec_srl (vss, vuc); + VSR VSR_VSS + vus __builtin_vec_srl (vus, vuc); + VSR VSR_VUS + vp __builtin_vec_srl (vp, vuc); + VSR VSR_VP + vsi __builtin_vec_srl (vsi, vuc); + VSR VSR_VSI + vui __builtin_vec_srl (vui, vuc); + VSR VSR_VUI + vsll __builtin_vec_srl (vsll, vuc); + VSR VSR_VSLL + vull __builtin_vec_srl (vull, vuc); + VSR VSR_VULL +; The following variants are deprecated. + vsc __builtin_vec_srl (vsc, vus); + VSR VSR_VSC_VUS + vsc __builtin_vec_srl (vsc, vui); + VSR VSR_VSC_VUI + vuc __builtin_vec_srl (vuc, vus); + VSR VSR_VUC_VUS + vuc __builtin_vec_srl (vuc, vui); + VSR VSR_VUC_VUI + vbc __builtin_vec_srl (vbc, vuc); + VSR VSR_VBC_VUC + vbc __builtin_vec_srl (vbc, vus); + VSR VSR_VBC_VUS + vbc __builtin_vec_srl (vbc, vui); + VSR VSR_VBC_VUI + vss __builtin_vec_srl (vss, vus); + VSR VSR_VSS_VUS + vss __builtin_vec_srl (vss, vui); + VSR VSR_VSS_VUI + vus __builtin_vec_srl (vus, vus); + VSR VSR_VUS_VUS + vus __builtin_vec_srl (vus, vui); + VSR VSR_VUS_VUI + vbs __builtin_vec_srl (vbs, vuc); + VSR VSR_VBS_VUC + vbs __builtin_vec_srl (vbs, vus); + VSR VSR_VBS_VUS + vbs __builtin_vec_srl (vbs, vui); + VSR VSR_VBS_VUI + vp __builtin_vec_srl (vp, vus); + VSR VSR_VP_VUS + vp __builtin_vec_srl (vp, vui); + VSR VSR_VP_VUI + vsi __builtin_vec_srl (vsi, vus); + VSR VSR_VSI_VUS + vsi __builtin_vec_srl (vsi, vui); + VSR VSR_VSI_VUI + vui __builtin_vec_srl (vui, vus); + VSR VSR_VUI_VUS + vui __builtin_vec_srl (vui, vui); + VSR VSR_VUI_VUI + vbi __builtin_vec_srl (vbi, vuc); + VSR VSR_VBI_VUC + vbi __builtin_vec_srl (vbi, vus); + VSR VSR_VBI_VUS + vbi __builtin_vec_srl (vbi, vui); + VSR VSR_VBI_VUI + +[VEC_SRO, vec_sro, __builtin_vec_sro] + vsc __builtin_vec_sro (vsc, vsc); + VSRO VSRO_VSCS + vsc __builtin_vec_sro (vsc, vuc); + VSRO VSRO_VSCU + vuc __builtin_vec_sro (vuc, vsc); + VSRO VSRO_VUCS + vuc __builtin_vec_sro (vuc, vuc); + VSRO VSRO_VUCU + vss __builtin_vec_sro (vss, vsc); + VSRO VSRO_VSSS + vss __builtin_vec_sro (vss, vuc); + VSRO VSRO_VSSU + vus __builtin_vec_sro (vus, vsc); + VSRO VSRO_VUSS + vus __builtin_vec_sro (vus, vuc); + VSRO VSRO_VUSU + vp __builtin_vec_sro (vp, vsc); + VSRO VSRO_VPS + vp __builtin_vec_sro (vp, vuc); + VSRO VSRO_VPU + vsi __builtin_vec_sro (vsi, vsc); + VSRO VSRO_VSIS + vsi __builtin_vec_sro (vsi, vuc); + VSRO VSRO_VSIU + vui __builtin_vec_sro (vui, vsc); + VSRO VSRO_VUIS + vui __builtin_vec_sro (vui, vuc); + VSRO VSRO_VUIU + vsll __builtin_vec_sro (vsll, vsc); + VSRO VSRO_VSLLS + vsll __builtin_vec_sro (vsll, vuc); + VSRO VSRO_VSLLU + vull __builtin_vec_sro (vull, vsc); + VSRO VSRO_VULLS + vull __builtin_vec_sro (vull, vuc); + VSRO VSRO_VULLU + vf __builtin_vec_sro (vf, vsc); + VSRO VSRO_VFS + vf __builtin_vec_sro (vf, vuc); + VSRO VSRO_VFU + +[VEC_SRV, vec_srv, __builtin_vec_vsrv, _ARCH_PWR9] + vuc __builtin_vec_vsrv (vuc, vuc); + VSRV + +[VEC_ST, vec_st, __builtin_vec_st] + void __builtin_vec_st (vsc, signed long long, vsc *); + STVX_V16QI STVX_VSC + void __builtin_vec_st (vsc, signed long long, signed char *); + STVX_V16QI STVX_SC + void __builtin_vec_st (vuc, signed long long, vuc *); + STVX_V16QI STVX_VUC + void __builtin_vec_st (vuc, signed long long, unsigned char *); + STVX_V16QI STVX_UC + void __builtin_vec_st (vbc, signed long long, vbc *); + STVX_V16QI STVX_VBC + void __builtin_vec_st (vbc, signed long long, signed char *); + STVX_V16QI STVX_SC_B + void __builtin_vec_st (vbc, signed long long, unsigned char *); + STVX_V16QI STVX_UC_B + void __builtin_vec_st (vss, signed long long, vss *); + STVX_V8HI STVX_VSS + void __builtin_vec_st (vss, signed long long, signed short *); + STVX_V8HI STVX_SS + void __builtin_vec_st (vus, signed long long, vus *); + STVX_V8HI STVX_VUS + void __builtin_vec_st (vus, signed long long, unsigned short *); + STVX_V8HI STVX_US + void __builtin_vec_st (vbs, signed long long, vbs *); + STVX_V8HI STVX_VBS + void __builtin_vec_st (vbs, signed long long, signed short *); + STVX_V8HI STVX_SS_B + void __builtin_vec_st (vbs, signed long long, unsigned short *); + STVX_V8HI STVX_US_B + void __builtin_vec_st (vp, signed long long, vp *); + STVX_V8HI STVX_P + void __builtin_vec_st (vsi, signed long long, vsi *); + STVX_V4SI STVX_VSI + void __builtin_vec_st (vsi, signed long long, signed int *); + STVX_V4SI STVX_SI + void __builtin_vec_st (vui, signed long long, vui *); + STVX_V4SI STVX_VUI + void __builtin_vec_st (vui, signed long long, unsigned int *); + STVX_V4SI STVX_UI + void __builtin_vec_st (vbi, signed long long, vbi *); + STVX_V4SI STVX_VBI + void __builtin_vec_st (vbi, signed long long, signed int *); + STVX_V4SI STVX_SI_B + void __builtin_vec_st (vbi, signed long long, unsigned int *); + STVX_V4SI STVX_UI_B + void __builtin_vec_st (vsll, signed long long, vsll *); + STVX_V2DI STVX_VSLL + void __builtin_vec_st (vsll, signed long long, signed long long *); + STVX_V2DI STVX_SLL + void __builtin_vec_st (vull, signed long long, vull *); + STVX_V2DI STVX_VULL + void __builtin_vec_st (vull, signed long long, unsigned long long *); + STVX_V2DI STVX_ULL + void __builtin_vec_st (vbll, signed long long, vbll *); + STVX_V2DI STVX_VBLL + void __builtin_vec_st (vf, signed long long, vf *); + STVX_V4SF STVX_VF + void __builtin_vec_st (vf, signed long long, float *); + STVX_V4SF STVX_F + void __builtin_vec_st (vd, signed long long, vd *); + STVX_V2DF STVX_VD + void __builtin_vec_st (vd, signed long long, double *); + STVX_V2DF STVX_D +; The following variants are deprecated. + void __builtin_vec_st (vbll, signed long long, signed long long *); + STVX_V2DI STVX_SLL_B + void __builtin_vec_st (vbll, signed long long, unsigned long long *); + STVX_V2DI STVX_ULL_B + +[VEC_STE, vec_ste, __builtin_vec_ste] + void __builtin_vec_ste (vsc, signed long long, signed char *); + STVEBX STVEBX_S + void __builtin_vec_ste (vuc, signed long long, unsigned char *); + STVEBX STVEBX_U + void __builtin_vec_ste (vbc, signed long long, signed char *); + STVEBX STVEBX_BS + void __builtin_vec_ste (vbc, signed long long, unsigned char *); + STVEBX STVEBX_BU + void __builtin_vec_ste (vss, signed long long, signed short *); + STVEHX STVEHX_S + void __builtin_vec_ste (vus, signed long long, unsigned short *); + STVEHX STVEHX_U + void __builtin_vec_ste (vbs, signed long long, signed short *); + STVEHX STVEHX_BS + void __builtin_vec_ste (vbs, signed long long, unsigned short *); + STVEHX STVEHX_BU + void __builtin_vec_ste (vp, signed long long, signed short *); + STVEHX STVEHX_PS + void __builtin_vec_ste (vp, signed long long, unsigned short *); + STVEHX STVEHX_PU + void __builtin_vec_ste (vsi, signed long long, signed int *); + STVEWX STVEHWX_S + void __builtin_vec_ste (vui, signed long long, unsigned int *); + STVEWX STVEWX_U + void __builtin_vec_ste (vbi, signed long long, signed int *); + STVEWX STVEWX_BS + void __builtin_vec_ste (vbi, signed long long, unsigned int *); + STVEWX STVEWX_BU + void __builtin_vec_ste (vf, signed long long, float *); + STVEWX STVEWX_F + +; There are no builtins for VEC_STEP; this is handled directly +; with a constant replacement in rs6000_resolve_overloaded_builtin. +; The single overload registers __builtin_vec_step with the front end +; so this can happen. +[VEC_STEP, vec_step, __builtin_vec_step] + signed int __builtin_vec_step (vsi); + VCLZLSBB_V4SI STEP_FAKERY + +[VEC_STL, vec_stl, __builtin_vec_stl] + void __builtin_vec_stl (vsc, signed long long, vsc *); + STVXL_V16QI STVXL_VSC + void __builtin_vec_stl (vsc, signed long long, signed char *); + STVXL_V16QI STVXL_SC + void __builtin_vec_stl (vuc, signed long long, vuc *); + STVXL_V16QI STVXL_VUC + void __builtin_vec_stl (vuc, signed long long, unsigned char *); + STVXL_V16QI STVXL_UC + void __builtin_vec_stl (vbc, signed long long, vbc *); + STVXL_V16QI STVXL_VBC + void __builtin_vec_stl (vbc, signed long long, signed char *); + STVXL_V16QI STVXL_SC_B + void __builtin_vec_stl (vbc, signed long long, unsigned char *); + STVXL_V16QI STVXL_UC_B + void __builtin_vec_stl (vss, signed long long, vss *); + STVXL_V8HI STVXL_VSS + void __builtin_vec_stl (vss, signed long long, signed short *); + STVXL_V8HI STVXL_SS + void __builtin_vec_stl (vus, signed long long, vus *); + STVXL_V8HI STVXL_VUS + void __builtin_vec_stl (vus, signed long long, unsigned short *); + STVXL_V8HI STVXL_US + void __builtin_vec_stl (vbs, signed long long, vbs *); + STVXL_V8HI STVXL_VBS + void __builtin_vec_stl (vbs, signed long long, signed short *); + STVXL_V8HI STVXL_SS_B + void __builtin_vec_stl (vbs, signed long long, unsigned short *); + STVXL_V8HI STVXL_US_B + void __builtin_vec_stl (vp, signed long long, vp *); + STVXL_V8HI STVXL_P + void __builtin_vec_stl (vsi, signed long long, vsi *); + STVXL_V4SI STVXL_VSI + void __builtin_vec_stl (vsi, signed long long, signed int *); + STVXL_V4SI STVXL_SI + void __builtin_vec_stl (vui, signed long long, vui *); + STVXL_V4SI STVXL_VUI + void __builtin_vec_stl (vui, signed long long, unsigned int *); + STVXL_V4SI STVXL_UI + void __builtin_vec_stl (vbi, signed long long, vbi *); + STVXL_V4SI STVXL_VBI + void __builtin_vec_stl (vbi, signed long long, signed int *); + STVXL_V4SI STVXL_SI_B + void __builtin_vec_stl (vbi, signed long long, unsigned int *); + STVXL_V4SI STVXL_UI_B + void __builtin_vec_stl (vsll, signed long long, vsll *); + STVXL_V2DI STVXL_VSLL + void __builtin_vec_stl (vsll, signed long long, signed long long *); + STVXL_V2DI STVXL_SLL + void __builtin_vec_stl (vull, signed long long, vull *); + STVXL_V2DI STVXL_VULL + void __builtin_vec_stl (vull, signed long long, unsigned long long *); + STVXL_V2DI STVXL_ULL + void __builtin_vec_stl (vbll, signed long long, vbll *); + STVXL_V2DI STVXL_VBLL + void __builtin_vec_stl (vbll, signed long long, signed long long *); + STVXL_V2DI STVXL_SLL_B + void __builtin_vec_stl (vbll, signed long long, unsigned long long *); + STVXL_V2DI STVXL_ULL_B + void __builtin_vec_stl (vf, signed long long, vf *); + STVXL_V4SF STVXL_VF + void __builtin_vec_stl (vf, signed long long, float *); + STVXL_V4SF STVXL_F + void __builtin_vec_stl (vd, signed long long, vd *); + STVXL_V2DF STVXL_VD + void __builtin_vec_stl (vd, signed long long, double *); + STVXL_V2DF STVXL_D + +[VEC_STRIL, vec_stril, __builtin_vec_stril, _ARCH_PWR10] + vuc __builtin_vec_stril (vuc); + VSTRIBL VSTRIBL_U + vsc __builtin_vec_stril (vsc); + VSTRIBL VSTRIBL_S + vus __builtin_vec_stril (vus); + VSTRIHL VSTRIHL_U + vss __builtin_vec_stril (vss); + VSTRIHL VSTRIHL_S + +[VEC_STRIL_P, vec_stril_p, __builtin_vec_stril_p, _ARCH_PWR10] + signed int __builtin_vec_stril_p (vuc); + VSTRIBL_P VSTRIBL_PU + signed int __builtin_vec_stril_p (vsc); + VSTRIBL_P VSTRIBL_PS + signed int __builtin_vec_stril_p (vus); + VSTRIHL_P VSTRIHL_PU + signed int __builtin_vec_stril_p (vss); + VSTRIHL_P VSTRIHL_PS + +[VEC_STRIR, vec_strir, __builtin_vec_strir, _ARCH_PWR10] + vuc __builtin_vec_strir (vuc); + VSTRIBR VSTRIBR_U + vsc __builtin_vec_strir (vsc); + VSTRIBR VSTRIBR_S + vus __builtin_vec_strir (vus); + VSTRIHR VSTRIHR_U + vss __builtin_vec_strir (vss); + VSTRIHR VSTRIHR_S + +[VEC_STRIR_P, vec_strir_p, __builtin_vec_strir_p, _ARCH_PWR10] + signed int __builtin_vec_strir_p (vuc); + VSTRIBR_P VSTRIBR_PU + signed int __builtin_vec_strir_p (vsc); + VSTRIBR_P VSTRIBR_PS + signed int __builtin_vec_strir_p (vus); + VSTRIHR_P VSTRIHR_PU + signed int __builtin_vec_strir_p (vss); + VSTRIHR_P VSTRIHR_PS + +[VEC_STVLX, vec_stvlx, __builtin_vec_stvlx, __PPU__] + void __builtin_vec_stvlx (vbc, signed long long, vbc *); + STVLX STVLX_VBC + void __builtin_vec_stvlx (vsc, signed long long, vsc *); + STVLX STVLX_VSC + void __builtin_vec_stvlx (vsc, signed long long, signed char *); + STVLX STVLX_SC + void __builtin_vec_stvlx (vuc, signed long long, vuc *); + STVLX STVLX_VUC + void __builtin_vec_stvlx (vuc, signed long long, unsigned char *); + STVLX STVLX_UC + void __builtin_vec_stvlx (vbs, signed long long, vbs *); + STVLX STVLX_VBS + void __builtin_vec_stvlx (vss, signed long long, vss *); + STVLX STVLX_VSS + void __builtin_vec_stvlx (vss, signed long long, signed short *); + STVLX STVLX_SS + void __builtin_vec_stvlx (vus, signed long long, vus *); + STVLX STVLX_VUS + void __builtin_vec_stvlx (vus, signed long long, unsigned short *); + STVLX STVLX_US + void __builtin_vec_stvlx (vp, signed long long, vp *); + STVLX STVLX_VP + void __builtin_vec_stvlx (vbi, signed long long, vbi *); + STVLX STVLX_VBI + void __builtin_vec_stvlx (vsi, signed long long, vsi *); + STVLX STVLX_VSI + void __builtin_vec_stvlx (vsi, signed long long, signed int *); + STVLX STVLX_SI + void __builtin_vec_stvlx (vui, signed long long, vui *); + STVLX STVLX_VUI + void __builtin_vec_stvlx (vui, signed long long, unsigned int *); + STVLX STVLX_UI + void __builtin_vec_stvlx (vf, signed long long, vf *); + STVLX STVLX_VF + void __builtin_vec_stvlx (vf, signed long long, float *); + STVLX STVLX_F + +[VEC_STVLXL, vec_stvlxl, __builtin_vec_stvlxl, __PPU__] + void __builtin_vec_stvlxl (vbc, signed long long, vbc *); + STVLXL STVLXL_VBC + void __builtin_vec_stvlxl (vsc, signed long long, vsc *); + STVLXL STVLXL_VSC + void __builtin_vec_stvlxl (vsc, signed long long, signed char *); + STVLXL STVLXL_SC + void __builtin_vec_stvlxl (vuc, signed long long, vuc *); + STVLXL STVLXL_VUC + void __builtin_vec_stvlxl (vuc, signed long long, unsigned char *); + STVLXL STVLXL_UC + void __builtin_vec_stvlxl (vbs, signed long long, vbs *); + STVLXL STVLXL_VBS + void __builtin_vec_stvlxl (vss, signed long long, vss *); + STVLXL STVLXL_VSS + void __builtin_vec_stvlxl (vss, signed long long, signed short *); + STVLXL STVLXL_SS + void __builtin_vec_stvlxl (vus, signed long long, vus *); + STVLXL STVLXL_VUS + void __builtin_vec_stvlxl (vus, signed long long, unsigned short *); + STVLXL STVLXL_US + void __builtin_vec_stvlxl (vp, signed long long, vp *); + STVLXL STVLXL_VP + void __builtin_vec_stvlxl (vbi, signed long long, vbi *); + STVLXL STVLXL_VBI + void __builtin_vec_stvlxl (vsi, signed long long, vsi *); + STVLXL STVLXL_VSI + void __builtin_vec_stvlxl (vsi, signed long long, signed int *); + STVLXL STVLXL_SI + void __builtin_vec_stvlxl (vui, signed long long, vui *); + STVLXL STVLXL_VUI + void __builtin_vec_stvlxl (vui, signed long long, unsigned int *); + STVLXL STVLXL_UI + void __builtin_vec_stvlxl (vf, signed long long, vf *); + STVLXL STVLXL_VF + void __builtin_vec_stvlxl (vf, signed long long, float *); + STVLXL STVLXL_F + +[VEC_STVRX, vec_stvrx, __builtin_vec_stvrx, __PPU__] + void __builtin_vec_stvrx (vbc, signed long long, vbc *); + STVRX STVRX_VBC + void __builtin_vec_stvrx (vsc, signed long long, vsc *); + STVRX STVRX_VSC + void __builtin_vec_stvrx (vsc, signed long long, signed char *); + STVRX STVRX_SC + void __builtin_vec_stvrx (vuc, signed long long, vuc *); + STVRX STVRX_VUC + void __builtin_vec_stvrx (vuc, signed long long, unsigned char *); + STVRX STVRX_UC + void __builtin_vec_stvrx (vbs, signed long long, vbs *); + STVRX STVRX_VBS + void __builtin_vec_stvrx (vss, signed long long, vss *); + STVRX STVRX_VSS + void __builtin_vec_stvrx (vss, signed long long, signed short *); + STVRX STVRX_SS + void __builtin_vec_stvrx (vus, signed long long, vus *); + STVRX STVRX_VUS + void __builtin_vec_stvrx (vus, signed long long, unsigned short *); + STVRX STVRX_US + void __builtin_vec_stvrx (vp, signed long long, vp *); + STVRX STVRX_VP + void __builtin_vec_stvrx (vbi, signed long long, vbi *); + STVRX STVRX_VBI + void __builtin_vec_stvrx (vsi, signed long long, vsi *); + STVRX STVRX_VSI + void __builtin_vec_stvrx (vsi, signed long long, signed int *); + STVRX STVRX_SI + void __builtin_vec_stvrx (vui, signed long long, vui *); + STVRX STVRX_VUI + void __builtin_vec_stvrx (vui, signed long long, unsigned int *); + STVRX STVRX_UI + void __builtin_vec_stvrx (vf, signed long long, vf *); + STVRX STVRX_VF + void __builtin_vec_stvrx (vf, signed long long, float *); + STVRX STVRX_F + +[VEC_STVRXL, vec_stvrxl, __builtin_vec_stvrxl, __PPU__] + void __builtin_vec_stvrxl (vbc, signed long long, vbc *); + STVRXL STVRXL_VBC + void __builtin_vec_stvrxl (vsc, signed long long, vsc *); + STVRXL STVRXL_VSC + void __builtin_vec_stvrxl (vsc, signed long long, signed char *); + STVRXL STVRXL_SC + void __builtin_vec_stvrxl (vuc, signed long long, vuc *); + STVRXL STVRXL_VUC + void __builtin_vec_stvrxl (vuc, signed long long, unsigned char *); + STVRXL STVRXL_UC + void __builtin_vec_stvrxl (vbs, signed long long, vbs *); + STVRXL STVRXL_VBS + void __builtin_vec_stvrxl (vss, signed long long, vss *); + STVRXL STVRXL_VSS + void __builtin_vec_stvrxl (vss, signed long long, signed short *); + STVRXL STVRXL_SS + void __builtin_vec_stvrxl (vus, signed long long, vus *); + STVRXL STVRXL_VUS + void __builtin_vec_stvrxl (vus, signed long long, unsigned short *); + STVRXL STVRXL_US + void __builtin_vec_stvrxl (vp, signed long long, vp *); + STVRXL STVRXL_VP + void __builtin_vec_stvrxl (vbi, signed long long, vbi *); + STVRXL STVRXL_VBI + void __builtin_vec_stvrxl (vsi, signed long long, vsi *); + STVRXL STVRXL_VSI + void __builtin_vec_stvrxl (vsi, signed long long, signed int *); + STVRXL STVRXL_SI + void __builtin_vec_stvrxl (vui, signed long long, vui *); + STVRXL STVRXL_VUI + void __builtin_vec_stvrxl (vui, signed long long, unsigned int *); + STVRXL STVRXL_UI + void __builtin_vec_stvrxl (vf, signed long long, vf *); + STVRXL STVRXL_VF + void __builtin_vec_stvrxl (vf, signed long long, float *); + STVRXL STVRXL_F + +[VEC_STXVL, vec_xst_len, __builtin_vec_stxvl, _ARCH_PPC64_PWR9] + void __builtin_vec_stxvl (vsc, signed char *, unsigned int); + STXVL STXVL_VSC + void __builtin_vec_stxvl (vuc, unsigned char *, unsigned int); + STXVL STXVL_VUC + void __builtin_vec_stxvl (vss, signed short *, unsigned int); + STXVL STXVL_VSS + void __builtin_vec_stxvl (vus, unsigned short *, unsigned int); + STXVL STXVL_VUS + void __builtin_vec_stxvl (vsi, signed int *, unsigned int); + STXVL STXVL_VSI + void __builtin_vec_stxvl (vui, unsigned int *, unsigned int); + STXVL STXVL_VUI + void __builtin_vec_stxvl (vsll, signed long long *, unsigned int); + STXVL STXVL_VSLL + void __builtin_vec_stxvl (vull, unsigned long long *, unsigned int); + STXVL STXVL_VULL + void __builtin_vec_stxvl (vsq, signed __int128 *, unsigned int); + STXVL STXVL_VSQ + void __builtin_vec_stxvl (vuq, unsigned __int128 *, unsigned int); + STXVL STXVL_VUQ + void __builtin_vec_stxvl (vf, float *, unsigned int); + STXVL STXVL_VF + void __builtin_vec_stxvl (vd, double *, unsigned int); + STXVL STXVL_VD + +[VEC_SUB, vec_sub, __builtin_vec_sub] + vsc __builtin_vec_sub (vsc, vsc); + VSUBUBM VSUBUBM_VSC + vuc __builtin_vec_sub (vuc, vuc); + VSUBUBM VSUBUBM_VUC + vss __builtin_vec_sub (vss, vss); + VSUBUHM VSUBUHM_VSS + vus __builtin_vec_sub (vus, vus); + VSUBUHM VSUBUHM_VUS + vsi __builtin_vec_sub (vsi, vsi); + VSUBUWM VSUBUWM_VSI + vui __builtin_vec_sub (vui, vui); + VSUBUWM VSUBUWM_VUI + vsll __builtin_vec_sub (vsll, vsll); + VSUBUDM VSUBUDM_VSLL + vull __builtin_vec_sub (vull, vull); + VSUBUDM VSUBUDM_VULL + vsq __builtin_vec_sub (vsq, vsq); + VSUBUQM VSUBUQM_VSQ + vuq __builtin_vec_sub (vuq, vuq); + VSUBUQM VSUBUQM_VUQ + vf __builtin_vec_sub (vf, vf); + VSUBFP + vd __builtin_vec_sub (vd, vd); + XVSUBDP +; The following variants are deprecated. + vsc __builtin_vec_sub (vsc, vbc); + VSUBUBM VSUBUBM_VSC_VBC + vsc __builtin_vec_sub (vbc, vsc); + VSUBUBM VSUBUBM_VBC_VSC + vuc __builtin_vec_sub (vuc, vbc); + VSUBUBM VSUBUBM_VUC_VBC + vuc __builtin_vec_sub (vbc, vuc); + VSUBUBM VSUBUBM_VBC_VUC + vss __builtin_vec_sub (vss, vbs); + VSUBUHM VSUBUHM_VSS_VBS + vss __builtin_vec_sub (vbs, vss); + VSUBUHM VSUBUHM_VBS_VSS + vus __builtin_vec_sub (vus, vbs); + VSUBUHM VSUBUHM_VUS_VBS + vus __builtin_vec_sub (vbs, vus); + VSUBUHM VSUBUHM_VBS_VUS + vsi __builtin_vec_sub (vsi, vbi); + VSUBUWM VSUBUWM_VSI_VBI + vsi __builtin_vec_sub (vbi, vsi); + VSUBUWM VSUBUWM_VBI_VSI + vui __builtin_vec_sub (vui, vbi); + VSUBUWM VSUBUWM_VUI_VBI + vui __builtin_vec_sub (vbi, vui); + VSUBUWM VSUBUWM_VBI_VUI + vsll __builtin_vec_sub (vsll, vbll); + VSUBUDM VSUBUDM_VSLL_VBLL + vsll __builtin_vec_sub (vbll, vsll); + VSUBUDM VSUBUDM_VBLL_VSLL + vull __builtin_vec_sub (vull, vbll); + VSUBUDM VSUBUDM_VULL_VBLL + vull __builtin_vec_sub (vbll, vull); + VSUBUDM VSUBUDM_VBLL_VULL + +[VEC_SUBC, vec_subc, __builtin_vec_subc] + vsi __builtin_vec_subc (vsi, vsi); + VSUBCUW VSUBCUW_VSI + vui __builtin_vec_subc (vui, vui); + VSUBCUW VSUBCUW_VUI + vsq __builtin_vec_subc (vsq, vsq); + VSUBCUQ VSUBCUQ_VSQ + vuq __builtin_vec_subc (vuq, vuq); + VSUBCUQ VSUBCUQ_VUQ + +; TODO: Note that the entry for VEC_SUBE currently gets ignored in +; altivec_resolve_overloaded_builtin. Revisit whether we can remove +; that. We still need to register the legal builtin forms here. +[VEC_SUBE, vec_sube, __builtin_vec_sube] + vsq __builtin_vec_sube (vsq, vsq, vsq); + VSUBEUQM VSUBEUQM_VSQ + vuq __builtin_vec_sube (vuq, vuq, vuq); + VSUBEUQM VSUBEUQM_VUQ + +; TODO: Note that the entry for VEC_SUBEC currently gets ignored in +; altivec_resolve_overloaded_builtin. Revisit whether we can remove +; that. We still need to register the legal builtin forms here. +[VEC_SUBEC, vec_subec, __builtin_vec_subec] + vsq __builtin_vec_subec (vsq, vsq, vsq); + VSUBECUQ VSUBECUQ_VSQ + vuq __builtin_vec_subec (vuq, vuq, vuq); + VSUBECUQ VSUBECUQ_VUQ + +[VEC_SUBS, vec_subs, __builtin_vec_subs] + vuc __builtin_vec_subs (vuc, vuc); + VSUBUBS + vsc __builtin_vec_subs (vsc, vsc); + VSUBSBS + vus __builtin_vec_subs (vus, vus); + VSUBUHS + vss __builtin_vec_subs (vss, vss); + VSUBSHS + vui __builtin_vec_subs (vui, vui); + VSUBUWS + vsi __builtin_vec_subs (vsi, vsi); + VSUBSWS +; The following variants are deprecated. + vuc __builtin_vec_subs (vuc, vbc); + VSUBUBS VSUBUBS_UB + vuc __builtin_vec_subs (vbc, vuc); + VSUBUBS VSUBUBS_BU + vsc __builtin_vec_subs (vsc, vbc); + VSUBSBS VSUBSBS_SB + vsc __builtin_vec_subs (vbc, vsc); + VSUBSBS VSUBSBS_BS + vus __builtin_vec_subs (vus, vbs); + VSUBUHS VSUBUHS_UB + vus __builtin_vec_subs (vbs, vus); + VSUBUHS VSUBUHS_BU + vss __builtin_vec_subs (vss, vbs); + VSUBSHS VSUBSHS_SB + vss __builtin_vec_subs (vbs, vss); + VSUBSHS VSUBSHS_BS + vui __builtin_vec_subs (vui, vbi); + VSUBUWS VSUBUWS_UB + vui __builtin_vec_subs (vbi, vui); + VSUBUWS VSUBUWS_BU + vsi __builtin_vec_subs (vsi, vbi); + VSUBSWS VSUBSWS_SB + vsi __builtin_vec_subs (vbi, vsi); + VSUBSWS VSUBSWS_BS + +[VEC_SUM2S, vec_sum2s, __builtin_vec_sum2s] + vsi __builtin_vec_sum2s (vsi, vsi); + VSUM2SWS + +[VEC_SUM4S, vec_sum4s, __builtin_vec_sum4s] + vui __builtin_vec_sum4s (vuc, vui); + VSUM4UBS + vsi __builtin_vec_sum4s (vsc, vsi); + VSUM4SBS + vsi __builtin_vec_sum4s (vss, vsi); + VSUM4SHS + +[VEC_SUMS, vec_sums, __builtin_vec_sums] + vsi __builtin_vec_sums (vsi, vsi); + VSUMSWS + +[VEC_TERNARYLOGIC, vec_ternarylogic, __builtin_vec_xxeval, _ARCH_PWR10] + vuc __builtin_vec_xxeval (vuc, vuc, vuc, const int); + XXEVAL XXEVAL_VUC + vus __builtin_vec_xxeval (vus, vus, vus, const int); + XXEVAL XXEVAL_VUS + vui __builtin_vec_xxeval (vui, vui, vui, const int); + XXEVAL XXEVAL_VUI + vull __builtin_vec_xxeval (vull, vull, vull, const int); + XXEVAL XXEVAL_VULL + vuq __builtin_vec_xxeval (vuq, vuq, vuq, const int); + XXEVAL XXEVAL_VUQ + +[VEC_TEST_LSBB_ALL_ONES, vec_test_lsbb_all_ones, __builtin_vec_xvtlsbb_all_ones, _ARCH_PWR9] + signed int __builtin_vec_xvtlsbb_all_ones (vuc); + XVTLSBB_ONES + +[VEC_TEST_LSBB_ALL_ZEROS, vec_test_lsbb_all_zeros, __builtin_vec_xvtlsbb_all_zeros, _ARCH_PWR9] + signed int __builtin_vec_xvtlsbb_all_zeros (vuc); + XVTLSBB_ZEROS + +[VEC_TRUNC, vec_trunc, __builtin_vec_trunc] + vf __builtin_vec_trunc (vf); + VRFIZ + vd __builtin_vec_trunc (vd); + XVRDPIZ + +[VEC_TSTSFI_GT, SKIP, __builtin_dfp_dtstsfi_gt] + signed int __builtin_dfp_dtstsfi_gt (const int, _Decimal64); + TSTSFI_GT_DD + signed int __builtin_dfp_dtstsfi_gt (const int, _Decimal128); + TSTSFI_GT_TD + +[VEC_TSTSFI_EQ, SKIP, __builtin_dfp_dtstsfi_eq] + signed int __builtin_dfp_dtstsfi_eq (const int, _Decimal64); + TSTSFI_EQ_DD + signed int __builtin_dfp_dtstsfi_eq (const int, _Decimal128); + TSTSFI_EQ_TD + +[VEC_TSTSFI_LT, SKIP, __builtin_dfp_dtstsfi_lt] + signed int __builtin_dfp_dtstsfi_lt (const int, _Decimal64); + TSTSFI_LT_DD + signed int __builtin_dfp_dtstsfi_lt (const int, _Decimal128); + TSTSFI_LT_TD + +[VEC_TSTSFI_OV, SKIP, __builtin_dfp_dtstsfi_ov] + signed int __builtin_dfp_dtstsfi_ov (const int, _Decimal64); + TSTSFI_OV_DD + signed int __builtin_dfp_dtstsfi_ov (const int, _Decimal128); + TSTSFI_OV_TD + +[VEC_UNPACKH, vec_unpackh, __builtin_vec_unpackh] + vss __builtin_vec_unpackh (vsc); + VUPKHSB VUPKHSB_VSC + vbs __builtin_vec_unpackh (vbc); + VUPKHSB VUPKHSB_VBC + vsi __builtin_vec_unpackh (vss); + VUPKHSH VUPKHSH_VSS + vbi __builtin_vec_unpackh (vbs); + VUPKHSH VUPKHSH_VBS + vui __builtin_vec_unpackh (vp); + VUPKHPX + vsll __builtin_vec_unpackh (vsi); + VUPKHSW VUPKHSW_VSI + vbll __builtin_vec_unpackh (vbi); + VUPKHSW VUPKHSW_VBI + vd __builtin_vec_unpackh (vf); + DOUBLEH_V4SF VUPKHF + +[VEC_UNPACKL, vec_unpackl, __builtin_vec_unpackl] + vss __builtin_vec_unpackl (vsc); + VUPKLSB VUPKLSB_VSC + vbs __builtin_vec_unpackl (vbc); + VUPKLSB VUPKLSB_VBC + vsi __builtin_vec_unpackl (vss); + VUPKLSH VUPKLSH_VSS + vbi __builtin_vec_unpackl (vbs); + VUPKLSH VUPKLSH_VBS + vui __builtin_vec_unpackl (vp); + VUPKLPX + vsll __builtin_vec_unpackl (vsi); + VUPKLSW VUPKLSW_VSI + vbll __builtin_vec_unpackl (vbi); + VUPKLSW VUPKLSW_VBI + vd __builtin_vec_unpackl (vf); + DOUBLEL_V4SF VUPKLF + +[VEC_UNSIGNED, vec_unsigned, __builtin_vec_vunsigned] + vui __builtin_vec_vunsigned (vf); + VEC_VUNSIGNED_V4SF + vull __builtin_vec_vunsigned (vd); + VEC_VUNSIGNED_V2DF + +[VEC_UNSIGNED2, vec_unsigned2, __builtin_vec_vunsigned2] + vui __builtin_vec_vunsigned2 (vd, vd); + VEC_VUNSIGNED2_V2DF + +[VEC_UNSIGNEDE, vec_unsignede, __builtin_vec_vunsignede] + vui __builtin_vec_vunsignede (vd); + VEC_VUNSIGNEDE_V2DF + +[VEC_UNSIGNEDO, vec_unsignedo, __builtin_vec_vunsignedo] + vui __builtin_vec_vunsignedo (vd); + VEC_VUNSIGNEDO_V2DF + +[VEC_VEE, vec_extract_exp, __builtin_vec_extract_exp, _ARCH_PWR9] + vui __builtin_vec_extract_exp (vf); + VEESP + vull __builtin_vec_extract_exp (vd); + VEEDP + +[VEC_VES, vec_extract_sig, __builtin_vec_extract_sig, _ARCH_PWR9] + vui __builtin_vec_extract_sig (vf); + VESSP + vull __builtin_vec_extract_sig (vd); + VESDP + +[VEC_VIE, vec_insert_exp, __builtin_vec_insert_exp, _ARCH_PWR9] + vf __builtin_vec_insert_exp (vf, vui); + VIESP VIESP_VF + vf __builtin_vec_insert_exp (vui, vui); + VIESP VIESP_VUI + vd __builtin_vec_insert_exp (vd, vull); + VIEDP VIEDP_VD + vd __builtin_vec_insert_exp (vull, vull); + VIEDP VIEDP_VULL + +; It is truly unfortunate that vec_vprtyb has an incompatible set of +; interfaces with vec_parity_lsbb. So we can't even deprecate this. +[VEC_VPRTYB, vec_vprtyb, __builtin_vec_vprtyb, _ARCH_PWR9] + vsi __builtin_vec_vprtyb (vsi); + VPRTYBW VPRTYB_VSI + vui __builtin_vec_vprtyb (vui); + VPRTYBW VPRTYB_VUI + vsll __builtin_vec_vprtyb (vsll); + VPRTYBD VPRTYB_VSLL + vull __builtin_vec_vprtyb (vull); + VPRTYBD VPRTYB_VULL + vsq __builtin_vec_vprtyb (vsq); + VPRTYBQ VPRTYB_VSQ + vuq __builtin_vec_vprtyb (vuq); + VPRTYBQ VPRTYB_VUQ + signed __int128 __builtin_vec_vprtyb (signed __int128); + VPRTYBQ VPRTYB_SQ + unsigned __int128 __builtin_vec_vprtyb (unsigned __int128); + VPRTYBQ VPRTYB_UQ + +[VEC_VSCEEQ, scalar_cmp_exp_eq, __builtin_vec_scalar_cmp_exp_eq, _ARCH_PWR9] + signed int __builtin_vec_scalar_cmp_exp_eq (double, double); + VSCEDPEQ + signed int __builtin_vec_scalar_cmp_exp_eq (_Float128, _Float128); + VSCEQPEQ + +[VEC_VSCEGT, scalar_cmp_exp_gt, __builtin_vec_scalar_cmp_exp_gt, _ARCH_PWR9] + signed int __builtin_vec_scalar_cmp_exp_gt (double, double); + VSCEDPGT + signed int __builtin_vec_scalar_cmp_exp_gt (_Float128, _Float128); + VSCEQPGT + +[VEC_VSCELT, scalar_cmp_exp_lt, __builtin_vec_scalar_cmp_exp_lt, _ARCH_PWR9] + signed int __builtin_vec_scalar_cmp_exp_lt (double, double); + VSCEDPLT + signed int __builtin_vec_scalar_cmp_exp_lt (_Float128, _Float128); + VSCEQPLT + +[VEC_VSCEUO, scalar_cmp_exp_unordered, __builtin_vec_scalar_cmp_exp_unordered, _ARCH_PWR9] + signed int __builtin_vec_scalar_cmp_exp_unordered (double, double); + VSCEDPUO + signed int __builtin_vec_scalar_cmp_exp_unordered (_Float128, _Float128); + VSCEQPUO + +[VEC_VSEE, scalar_extract_exp, __builtin_vec_scalar_extract_exp, _ARCH_PWR9] + unsigned int __builtin_vec_scalar_extract_exp (double); + VSEEDP + unsigned int __builtin_vec_scalar_extract_exp (_Float128); + VSEEQP + +[VEC_VSES, scalar_extract_sig, __builtin_vec_scalar_extract_sig, _ARCH_PWR9] + unsigned long long __builtin_vec_scalar_extract_sig (double); + VSESDP + unsigned __int128 __builtin_vec_scalar_extract_sig (_Float128); + VSESQP + +[VEC_VSIE, scalar_insert_exp, __builtin_vec_scalar_insert_exp, _ARCH_PWR9] + double __builtin_vec_scalar_insert_exp (unsigned long long, unsigned long long); + VSIEDP + double __builtin_vec_scalar_insert_exp (double, unsigned long long); + VSIEDPF + _Float128 __builtin_vec_scalar_insert_exp (unsigned __int128, unsigned long long); + VSIEQP + _Float128 __builtin_vec_scalar_insert_exp (_Float128, unsigned long long); + VSIEQPF + +[VEC_VSTDC, scalar_test_data_class, __builtin_vec_scalar_test_data_class, _ARCH_PWR9] + unsigned int __builtin_vec_scalar_test_data_class (float, const int); + VSTDCSP + unsigned int __builtin_vec_scalar_test_data_class (double, const int); + VSTDCDP + unsigned int __builtin_vec_scalar_test_data_class (_Float128, const int); + VSTDCQP + +[VEC_VSTDCN, scalar_test_neg, __builtin_vec_scalar_test_neg, _ARCH_PWR9] + unsigned int __builtin_vec_scalar_test_neg (float); + VSTDCNSP + unsigned int __builtin_vec_scalar_test_neg (double); + VSTDCNDP + unsigned int __builtin_vec_scalar_test_neg (_Float128); + VSTDCNQP + +[VEC_VTDC, vec_test_data_class, __builtin_vec_test_data_class, _ARCH_PWR9] + vbi __builtin_vec_test_data_class (vf, const int); + VTDCSP + vbll __builtin_vec_test_data_class (vd, const int); + VTDCDP + +[VEC_XL, vec_xl, __builtin_vec_vsx_ld, __VSX__] + vsc __builtin_vec_vsx_ld (signed long long, const vsc *); + LXVW4X_V16QI LXVW4X_VSC + vsc __builtin_vec_vsx_ld (signed long long, const signed char *); + LXVW4X_V16QI LXVW4X_SC + vuc __builtin_vec_vsx_ld (signed long long, const vuc *); + LXVW4X_V16QI LXVW4X_VUC + vuc __builtin_vec_vsx_ld (signed long long, const unsigned char *); + LXVW4X_V16QI LXVW4X_UC + vbc __builtin_vec_vsx_ld (signed long long, const vbc *); + LXVW4X_V16QI LXVW4X_VBC + vss __builtin_vec_vsx_ld (signed long long, const vss *); + LXVW4X_V8HI LXVW4X_VSS + vss __builtin_vec_vsx_ld (signed long long, const signed short *); + LXVW4X_V8HI LXVW4X_SS + vus __builtin_vec_vsx_ld (signed long long, const vus *); + LXVW4X_V8HI LXVW4X_VUS + vus __builtin_vec_vsx_ld (signed long long, const unsigned short *); + LXVW4X_V8HI LXVW4X_US + vbs __builtin_vec_vsx_ld (signed long long, const vbs *); + LXVW4X_V8HI LXVW4X_VBS + vp __builtin_vec_vsx_ld (signed long long, const vp *); + LXVW4X_V8HI LXVW4X_P + vsi __builtin_vec_vsx_ld (signed long long, const vsi *); + LXVW4X_V4SI LXVW4X_VSI + vsi __builtin_vec_vsx_ld (signed long long, const signed int *); + LXVW4X_V4SI LXVW4X_SI + vui __builtin_vec_vsx_ld (signed long long, const vui *); + LXVW4X_V4SI LXVW4X_VUI + vui __builtin_vec_vsx_ld (signed long long, const unsigned int *); + LXVW4X_V4SI LXVW4X_UI + vbi __builtin_vec_vsx_ld (signed long long, const vbi *); + LXVW4X_V4SI LXVW4X_VBI + vsll __builtin_vec_vsx_ld (signed long long, const vsll *); + LXVD2X_V2DI LXVD2X_VSLL + vsll __builtin_vec_vsx_ld (signed long long, const signed long long *); + LXVD2X_V2DI LXVD2X_SLL + vull __builtin_vec_vsx_ld (signed long long, const vull *); + LXVD2X_V2DI LXVD2X_VULL + vull __builtin_vec_vsx_ld (signed long long, const unsigned long long *); + LXVD2X_V2DI LXVD2X_ULL + vbll __builtin_vec_vsx_ld (signed long long, const vbll *); + LXVD2X_V2DI LXVD2X_VBLL + vsq __builtin_vec_vsx_ld (signed long long, const vsq *); + LXVD2X_V1TI LXVD2X_VSQ + vsq __builtin_vec_vsx_ld (signed long long, const signed __int128 *); + LXVD2X_V1TI LXVD2X_SQ + vuq __builtin_vec_vsx_ld (signed long long, const unsigned __int128 *); + LXVD2X_V1TI LXVD2X_UQ + vf __builtin_vec_vsx_ld (signed long long, const vf *); + LXVW4X_V4SF LXVW4X_VF + vf __builtin_vec_vsx_ld (signed long long, const float *); + LXVW4X_V4SF LXVW4X_F + vd __builtin_vec_vsx_ld (signed long long, const vd *); + LXVD2X_V2DF LXVD2X_VD + vd __builtin_vec_vsx_ld (signed long long, const double *); + LXVD2X_V2DF LXVD2X_D + +[VEC_XL_BE, vec_xl_be, __builtin_vec_xl_be, __VSX__] + vsc __builtin_vec_xl_be (signed long long, const vsc *); + LD_ELEMREV_V16QI LD_ELEMREV_VSC + vsc __builtin_vec_xl_be (signed long long, const signed char *); + LD_ELEMREV_V16QI LD_ELEMREV_SC + vuc __builtin_vec_xl_be (signed long long, const vuc *); + LD_ELEMREV_V16QI LD_ELEMREV_VUC + vuc __builtin_vec_xl_be (signed long long, const unsigned char *); + LD_ELEMREV_V16QI LD_ELEMREV_UC + vss __builtin_vec_xl_be (signed long long, const vss *); + LD_ELEMREV_V8HI LD_ELEMREV_VSS + vss __builtin_vec_xl_be (signed long long, const signed short *); + LD_ELEMREV_V8HI LD_ELEMREV_SS + vus __builtin_vec_xl_be (signed long long, const vus *); + LD_ELEMREV_V8HI LD_ELEMREV_VUS + vus __builtin_vec_xl_be (signed long long, const unsigned short *); + LD_ELEMREV_V8HI LD_ELEMREV_US + vsi __builtin_vec_xl_be (signed long long, const vsi *); + LD_ELEMREV_V4SI LD_ELEMREV_VSI + vsi __builtin_vec_xl_be (signed long long, const signed int *); + LD_ELEMREV_V4SI LD_ELEMREV_SI + vui __builtin_vec_xl_be (signed long long, const vui *); + LD_ELEMREV_V4SI LD_ELEMREV_VUI + vui __builtin_vec_xl_be (signed long long, const unsigned int *); + LD_ELEMREV_V4SI LD_ELEMREV_UI + vsll __builtin_vec_xl_be (signed long long, const vsll *); + LD_ELEMREV_V2DI LD_ELEMREV_VSLL + vsll __builtin_vec_xl_be (signed long long, const signed long long *); + LD_ELEMREV_V2DI LD_ELEMREV_SLL + vull __builtin_vec_xl_be (signed long long, const vull *); + LD_ELEMREV_V2DI LD_ELEMREV_VULL + vull __builtin_vec_xl_be (signed long long, const unsigned long long *); + LD_ELEMREV_V2DI LD_ELEMREV_ULL + vsq __builtin_vec_xl_be (signed long long, const signed __int128 *); + LD_ELEMREV_V1TI LD_ELEMREV_SQ + vuq __builtin_vec_xl_be (signed long long, const unsigned __int128 *); + LD_ELEMREV_V1TI LD_ELEMREV_UQ + vf __builtin_vec_xl_be (signed long long, const vf *); + LD_ELEMREV_V4SF LD_ELEMREV_VF + vf __builtin_vec_xl_be (signed long long, const float *); + LD_ELEMREV_V4SF LD_ELEMREV_F + vd __builtin_vec_xl_be (signed long long, const vd *); + LD_ELEMREV_V2DF LD_ELEMREV_VD + vd __builtin_vec_xl_be (signed long long, const double *); + LD_ELEMREV_V2DF LD_ELEMREV_DD + +[VEC_XL_LEN_R, vec_xl_len_r, __builtin_vec_xl_len_r, _ARCH_PPC64_PWR9] + vuc __builtin_vsx_xl_len_r (const unsigned char *, unsigned int); + XL_LEN_R + +[VEC_XL_SEXT, vec_xl_sext, __builtin_vec_xl_sext, _ARCH_PWR10] + vsq __builtin_vec_xl_sext (signed long long, const signed char *); + SE_LXVRBX + vsq __builtin_vec_xl_sext (signed long long, const signed short *); + SE_LXVRHX + vsq __builtin_vec_xl_sext (signed long long, const signed int *); + SE_LXVRWX + vsq __builtin_vec_xl_sext (signed long long, const signed long long *); + SE_LXVRDX + +[VEC_XL_ZEXT, vec_xl_zext, __builtin_vec_xl_zext, _ARCH_PWR10] + vuq __builtin_vec_xl_zext (signed long long, const unsigned char *); + ZE_LXVRBX + vuq __builtin_vec_xl_zext (signed long long, const unsigned short *); + ZE_LXVRHX + vuq __builtin_vec_xl_zext (signed long long, const unsigned int *); + ZE_LXVRWX + vuq __builtin_vec_xl_zext (signed long long, const unsigned long long *); + ZE_LXVRDX + +[VEC_XOR, vec_xor, __builtin_vec_xor] + vsc __builtin_vec_xor (vsc, vsc); + VXOR_V16QI + vuc __builtin_vec_xor (vuc, vuc); + VXOR_V16QI_UNS VXOR_VUC + vbc __builtin_vec_xor (vbc, vbc); + VXOR_V16QI_UNS VXOR_VBC + vss __builtin_vec_xor (vss, vss); + VXOR_V8HI + vus __builtin_vec_xor (vus, vus); + VXOR_V8HI_UNS VXOR_VUS + vbs __builtin_vec_xor (vbs, vbs); + VXOR_V8HI_UNS VXOR_VBS + vsi __builtin_vec_xor (vsi, vsi); + VXOR_V4SI + vui __builtin_vec_xor (vui, vui); + VXOR_V4SI_UNS VXOR_VUI + vbi __builtin_vec_xor (vbi, vbi); + VXOR_V4SI_UNS VXOR_VBI + vsll __builtin_vec_xor (vsll, vsll); + VXOR_V2DI + vull __builtin_vec_xor (vull, vull); + VXOR_V2DI_UNS VXOR_VULL + vbll __builtin_vec_xor (vbll, vbll); + VXOR_V2DI_UNS VXOR_VBLL + vf __builtin_vec_xor (vf, vf); + VXOR_V4SF + vd __builtin_vec_xor (vd, vd); + VXOR_V2DF +; The following variants are deprecated. + vsc __builtin_vec_xor (vsc, vbc); + VXOR_V16QI VXOR_VSC_VBC + vsc __builtin_vec_xor (vbc, vsc); + VXOR_V16QI VXOR_VBC_VSC + vsc __builtin_vec_xor (vsc, vuc); + VXOR_V16QI VXOR_VSC_VUC + vuc __builtin_vec_xor (vuc, vbc); + VXOR_V16QI_UNS VXOR_VUC_VBC + vuc __builtin_vec_xor (vbc, vuc); + VXOR_V16QI_UNS VXOR_VBC_VUC + vuc __builtin_vec_xor (vuc, vsc); + VXOR_V16QI_UNS VXOR_VUC_VSC + vss __builtin_vec_xor (vss, vbs); + VXOR_V8HI VXOR_VSS_VBS + vss __builtin_vec_xor (vbs, vss); + VXOR_V8HI VXOR_VBS_VSS + vus __builtin_vec_xor (vus, vbs); + VXOR_V8HI_UNS VXOR_VUS_VBS + vus __builtin_vec_xor (vbs, vus); + VXOR_V8HI_UNS VXOR_VBS_VUS + vsi __builtin_vec_xor (vsi, vbi); + VXOR_V4SI VXOR_VSI_VBI + vsi __builtin_vec_xor (vbi, vsi); + VXOR_V4SI VXOR_VBI_VSI + vui __builtin_vec_xor (vui, vbi); + VXOR_V4SI_UNS VXOR_VUI_VBI + vui __builtin_vec_xor (vbi, vui); + VXOR_V4SI_UNS VXOR_VBI_VUI + vsll __builtin_vec_xor (vsll, vbll); + VXOR_V2DI VXOR_VSLL_VBLL + vsll __builtin_vec_xor (vbll, vsll); + VXOR_V2DI VXOR_VBLL_VSLL + vull __builtin_vec_xor (vull, vbll); + VXOR_V2DI_UNS VXOR_VULL_VBLL + vull __builtin_vec_xor (vbll, vull); + VXOR_V2DI_UNS VXOR_VBLL_VULL + vf __builtin_vec_xor (vf, vbi); + VXOR_V4SF VXOR_VF_VBI + vf __builtin_vec_xor (vbi, vf); + VXOR_V4SF VXOR_VBI_VF + vd __builtin_vec_xor (vd, vbll); + VXOR_V2DF VXOR_VD_VBLL + vd __builtin_vec_xor (vbll, vd); + VXOR_V2DF VXOR_VBLL_VD + +[VEC_XST, vec_xst, __builtin_vec_vsx_st, __VSX__] + void __builtin_vec_vsx_st (vsc, signed long long, vsc *); + STXVW4X_V16QI STXVW4X_VSC + void __builtin_vec_vsx_st (vsc, signed long long, signed char *); + STXVW4X_V16QI STXVW4X_SC + void __builtin_vec_vsx_st (vuc, signed long long, vuc *); + STXVW4X_V16QI STXVW4X_VUC + void __builtin_vec_vsx_st (vuc, signed long long, unsigned char *); + STXVW4X_V16QI STXVW4X_UC + void __builtin_vec_vsx_st (vbc, signed long long, vbc *); + STXVW4X_V16QI STXVW4X_VBC + void __builtin_vec_vsx_st (vbc, signed long long, signed char *); + STXVW4X_V16QI STXVW4X_VBC_S + void __builtin_vec_vsx_st (vbc, signed long long, unsigned char *); + STXVW4X_V16QI STXVW4X_VBC_U + void __builtin_vec_vsx_st (vss, signed long long, vss *); + STXVW4X_V8HI STXVW4X_VSS + void __builtin_vec_vsx_st (vss, signed long long, signed short *); + STXVW4X_V8HI STXVW4X_SS + void __builtin_vec_vsx_st (vus, signed long long, vus *); + STXVW4X_V8HI STXVW4X_VUS + void __builtin_vec_vsx_st (vus, signed long long, unsigned short *); + STXVW4X_V8HI STXVW4X_US + void __builtin_vec_vsx_st (vbs, signed long long, vbs *); + STXVW4X_V8HI STXVW4X_VBS + void __builtin_vec_vsx_st (vbs, signed long long, signed short *); + STXVW4X_V8HI STXVW4X_VBS_S + void __builtin_vec_vsx_st (vbs, signed long long, unsigned short *); + STXVW4X_V8HI STXVW4X_VBS_U + void __builtin_vec_vsx_st (vp, signed long long, vp *); + STXVW4X_V8HI STXVW4X_VP + void __builtin_vec_vsx_st (vsi, signed long long, vsi *); + STXVW4X_V4SI STXVW4X_VSI + void __builtin_vec_vsx_st (vsi, signed long long, signed int *); + STXVW4X_V4SI STXVW4X_SI + void __builtin_vec_vsx_st (vui, signed long long, vui *); + STXVW4X_V4SI STXVW4X_VUI + void __builtin_vec_vsx_st (vui, signed long long, unsigned int *); + STXVW4X_V4SI STXVW4X_UI + void __builtin_vec_vsx_st (vbi, signed long long, vbi *); + STXVW4X_V4SI STXVW4X_VBI + void __builtin_vec_vsx_st (vbi, signed long long, signed int *); + STXVW4X_V4SI STXVW4X_VBI_S + void __builtin_vec_vsx_st (vbi, signed long long, unsigned int *); + STXVW4X_V4SI STXVW4X_VBI_U + void __builtin_vec_vsx_st (vsll, signed long long, vsll *); + STXVD2X_V2DI STXVD2X_VSLL + void __builtin_vec_vsx_st (vsll, signed long long, signed long long *); + STXVD2X_V2DI STXVD2X_SLL + void __builtin_vec_vsx_st (vull, signed long long, vull *); + STXVD2X_V2DI STXVD2X_VULL + void __builtin_vec_vsx_st (vull, signed long long, unsigned long long *); + STXVD2X_V2DI STXVD2X_ULL + void __builtin_vec_vsx_st (vbll, signed long long, vbll *); + STXVD2X_V2DI STXVD2X_VBLL + void __builtin_vec_vsx_st (vsq, signed long long, signed __int128 *); + STXVD2X_V1TI STXVD2X_SQ + void __builtin_vec_vsx_st (vuq, signed long long, unsigned __int128 *); + STXVD2X_V1TI STXVD2X_UQ + void __builtin_vec_vsx_st (vf, signed long long, vf *); + STXVW4X_V4SF STXVW4X_VF + void __builtin_vec_vsx_st (vf, signed long long, float *); + STXVW4X_V4SF STXVW4X_F + void __builtin_vec_vsx_st (vd, signed long long, vd *); + STXVD2X_V2DF STXVD2X_VD + void __builtin_vec_vsx_st (vd, signed long long, double *); + STXVD2X_V2DF STXVD2X_D + +[VEC_XST_BE, vec_xst_be, __builtin_vec_xst_be, __VSX__] + void __builtin_vec_xst_be (vsc, signed long long, vsc *); + ST_ELEMREV_V16QI ST_ELEMREV_VSC + void __builtin_vec_xst_be (vsc, signed long long, signed char *); + ST_ELEMREV_V16QI ST_ELEMREV_SC_ + void __builtin_vec_xst_be (vuc, signed long long, vuc *); + ST_ELEMREV_V16QI ST_ELEMREV_VUC + void __builtin_vec_xst_be (vuc, signed long long, unsigned char *); + ST_ELEMREV_V16QI ST_ELEMREV_UC + void __builtin_vec_xst_be (vss, signed long long, vss *); + ST_ELEMREV_V8HI ST_ELEMREV_VSS + void __builtin_vec_xst_be (vss, signed long long, signed short *); + ST_ELEMREV_V8HI ST_ELEMREV_SS + void __builtin_vec_xst_be (vus, signed long long, vus *); + ST_ELEMREV_V8HI ST_ELEMREV_VUS + void __builtin_vec_xst_be (vus, signed long long, unsigned short *); + ST_ELEMREV_V8HI ST_ELEMREV_US + void __builtin_vec_xst_be (vsi, signed long long, vsi *); + ST_ELEMREV_V4SI ST_ELEMREV_VSI + void __builtin_vec_xst_be (vsi, signed long long, signed int *); + ST_ELEMREV_V4SI ST_ELEMREV_SI + void __builtin_vec_xst_be (vui, signed long long, vui *); + ST_ELEMREV_V4SI ST_ELEMREV_VUI + void __builtin_vec_xst_be (vui, signed long long, unsigned int *); + ST_ELEMREV_V4SI ST_ELEMREV_UI + void __builtin_vec_xst_be (vsll, signed long long, vsll *); + ST_ELEMREV_V2DI ST_ELEMREV_VSLL + void __builtin_vec_xst_be (vsll, signed long long, signed long long *); + ST_ELEMREV_V2DI ST_ELEMREV_SLL + void __builtin_vec_xst_be (vull, signed long long, vull *); + ST_ELEMREV_V2DI ST_ELEMREV_VULL + void __builtin_vec_xst_be (vull, signed long long, unsigned long long *); + ST_ELEMREV_V2DI ST_ELEMREV_ULL + void __builtin_vec_xst_be (vsq, signed long long, signed __int128 *); + ST_ELEMREV_V1TI ST_ELEMREV_SQ + void __builtin_vec_xst_be (vuq, signed long long, unsigned __int128 *); + ST_ELEMREV_V1TI ST_ELEMREV_UQ + void __builtin_vec_xst_be (vf, signed long long, vf *); + ST_ELEMREV_V4SF ST_ELEMREV_VF + void __builtin_vec_xst_be (vf, signed long long, float *); + ST_ELEMREV_V4SF ST_ELEMREV_F + void __builtin_vec_xst_be (vd, signed long long, vd *); + ST_ELEMREV_V2DF ST_ELEMREV_VD + void __builtin_vec_xst_be (vd, signed long long, double *); + ST_ELEMREV_V2DF ST_ELEMREV_D + +[VEC_XST_LEN_R, vec_xst_len_r, __builtin_vec_xst_len_r, _ARCH_PPC64_PWR9] + void __builtin_vsx_xst_len_r (vuc, unsigned char *, unsigned int); + XST_LEN_R + +[VEC_XST_TRUNC, vec_xst_trunc, __builtin_vec_xst_trunc, _ARCH_PWR10] + void __builtin_vec_xst_trunc (vsq, signed long long, signed char *); + TR_STXVRBX TR_STXVRBX_S + void __builtin_vec_xst_trunc (vuq, signed long long, unsigned char *); + TR_STXVRBX TR_STXVRBX_U + void __builtin_vec_xst_trunc (vsq, signed long long, signed short *); + TR_STXVRHX TR_STXVRHX_S + void __builtin_vec_xst_trunc (vuq, signed long long, unsigned short *); + TR_STXVRHX TR_STXVRHX_U + void __builtin_vec_xst_trunc (vsq, signed long long, signed int *); + TR_STXVRWX TR_STXVRWX_S + void __builtin_vec_xst_trunc (vuq, signed long long, unsigned int *); + TR_STXVRWX TR_STXVRWX_U + void __builtin_vec_xst_trunc (vsq, signed long long, signed long long *); + TR_STXVRDX TR_STXVRDX_S + void __builtin_vec_xst_trunc (vuq, signed long long, unsigned long long *); + TR_STXVRDX TR_STXVRDX_U + +[VEC_XXPERMDI, vec_xxpermdi, __builtin_vsx_xxpermdi, __VSX__] + vsc __builtin_vsx_xxpermdi (vsc, vsc, const int); + XXPERMDI_16QI XXPERMDI_VSC + vuc __builtin_vsx_xxpermdi (vuc, vuc, const int); + XXPERMDI_16QI XXPERMDI_VUC + vss __builtin_vsx_xxpermdi (vss, vss, const int); + XXPERMDI_8HI XXPERMDI_VSS + vus __builtin_vsx_xxpermdi (vus, vus, const int); + XXPERMDI_8HI XXPERMDI_VUS + vsi __builtin_vsx_xxpermdi (vsi, vsi, const int); + XXPERMDI_4SI XXPERMDI_VSI + vui __builtin_vsx_xxpermdi (vui, vui, const int); + XXPERMDI_4SI XXPERMDI_VUI + vsll __builtin_vsx_xxpermdi (vsll, vsll, const int); + XXPERMDI_2DI XXPERMDI_VSLL + vull __builtin_vsx_xxpermdi (vull, vull, const int); + XXPERMDI_2DI XXPERMDI_VULL + vf __builtin_vsx_xxpermdi (vf, vf, const int); + XXPERMDI_4SF XXPERMDI_VF + vd __builtin_vsx_xxpermdi (vd, vd, const int); + XXPERMDI_2DF XXPERMDI_VD + +[VEC_XXSLDWI, vec_xxsldwi, __builtin_vsx_xxsldwi, __VSX__] + vsc __builtin_vsx_xxsldwi (vsc, vsc, const int); + XXSLDWI_16QI XXSLDWI_VSC2 + vuc __builtin_vsx_xxsldwi (vuc, vuc, const int); + XXSLDWI_16QI XXSLDWI_VUC2 + vss __builtin_vsx_xxsldwi (vss, vss, const int); + XXSLDWI_8HI XXSLDWI_VSS2 + vus __builtin_vsx_xxsldwi (vus, vus, const int); + XXSLDWI_8HI XXSLDWI_VUS2 + vsi __builtin_vsx_xxsldwi (vsi, vsi, const int); + XXSLDWI_4SI XXSLDWI_VSI2 + vui __builtin_vsx_xxsldwi (vui, vui, const int); + XXSLDWI_4SI XXSLDWI_VUI2 + vsll __builtin_vsx_xxsldwi (vsll, vsll, const int); + XXSLDWI_2DI XXSLDWI_VSLL2 + vull __builtin_vsx_xxsldwi (vull, vull, const int); + XXSLDWI_2DI XXSLDWI_VULL2 + vf __builtin_vsx_xxsldwi (vf, vf, const int); + XXSLDWI_4SF XXSLDWI_VF2 + vd __builtin_vsx_xxsldwi (vd, vd, const int); + XXSLDWI_2DF XXSLDWI_VD2 + + +; ************************************************************************** +; ************************************************************************** +; **** Deprecated overloads that should never have existed at all **** +; ************************************************************************** +; ************************************************************************** + +[VEC_LVEBX, vec_lvebx, __builtin_vec_lvebx] + vsc __builtin_vec_lvebx (signed long, signed char *); + LVEBX LVEBX_DEPR1 + vuc __builtin_vec_lvebx (signed long, unsigned char *); + LVEBX LVEBX_DEPR2 + +[VEC_LVEHX, vec_lvehx, __builtin_vec_lvehx] + vss __builtin_vec_lvehx (signed long, signed short *); + LVEHX LVEHX_DEPR1 + vus __builtin_vec_lvehx (signed long, unsigned short *); + LVEHX LVEHX_DEPR2 + +[VEC_LVEWX, vec_lvewx, __builtin_vec_lvewx] + vf __builtin_vec_lvewx (signed long, float *); + LVEWX LVEWX_DEPR1 + vsi __builtin_vec_lvewx (signed long, signed int *); + LVEWX LVEWX_DEPR2 + vui __builtin_vec_lvewx (signed long, unsigned int *); + LVEWX LVEWX_DEPR3 + vsi __builtin_vec_lvewx (signed long, signed long *); + LVEWX LVEWX_DEPR4 + vui __builtin_vec_lvewx (signed long, unsigned long *); + LVEWX LVEWX_DEPR5 + +[VEC_STVEBX, vec_stvebx, __builtin_vec_stvebx] + void __builtin_vec_stvebx (vsc, signed long, signed char *); + STVEBX STVEBX_DEPR1 + void __builtin_vec_stvebx (vuc, signed long, unsigned char *); + STVEBX STVEBX_DEPR2 + void __builtin_vec_stvebx (vbc, signed long, signed char *); + STVEBX STVEBX_DEPR3 + void __builtin_vec_stvebx (vbc, signed long, signed char *); + STVEBX STVEBX_DEPR4 + void __builtin_vec_stvebx (vsc, signed long, void *); + STVEBX STVEBX_DEPR5 + void __builtin_vec_stvebx (vuc, signed long, void *); + STVEBX STVEBX_DEPR6 + +[VEC_STVEHX, vec_stvehx, __builtin_vec_stvehx] + void __builtin_vec_stvehx (vss, signed long, signed short *); + STVEHX STVEHX_DEPR1 + void __builtin_vec_stvehx (vus, signed long, unsigned short *); + STVEHX STVEHX_DEPR2 + void __builtin_vec_stvehx (vbs, signed long, signed short *); + STVEHX STVEHX_DEPR3 + void __builtin_vec_stvehx (vbs, signed long, signed short *); + STVEHX STVEHX_DEPR4 + void __builtin_vec_stvehx (vss, signed long, void *); + STVEHX STVEHX_DEPR5 + void __builtin_vec_stvehx (vus, signed long, void *); + STVEHX STVEHX_DEPR6 + +[VEC_STVEWX, vec_stvewx, __builtin_vec_stvewx] + void __builtin_vec_stvewx (vf, signed long, float *); + STVEWX STVEWX_DEPR1 + void __builtin_vec_stvewx (vsi, signed long, signed int *); + STVEWX STVEWX_DEPR2 + void __builtin_vec_stvewx (vui, signed long, unsigned int *); + STVEWX STVEWX_DEPR3 + void __builtin_vec_stvewx (vbi, signed long, signed int *); + STVEWX STVEWX_DEPR4 + void __builtin_vec_stvewx (vbi, signed long, unsigned int *); + STVEWX STVEWX_DEPR5 + void __builtin_vec_stvewx (vf, signed long, void *); + STVEWX STVEWX_DEPR6 + void __builtin_vec_stvewx (vsi, signed long, void *); + STVEWX STVEWX_DEPR7 + void __builtin_vec_stvewx (vui, signed long, void *); + STVEWX STVEWX_DEPR8 + +[VEC_TSTSFI_EQ_DD, SKIP, __builtin_dfp_dtstsfi_eq_dd, _ARCH_PWR9] + signed int __builtin_dfp_dtstsfi_eq_dd (const int, _Decimal64); + TSTSFI_EQ_DD TSTSFI_EQ_DD_DEPR1 + +[VEC_TSTSFI_EQ_TD, SKIP, __builtin_dfp_dtstsfi_eq_td, _ARCH_PWR9] + signed int __builtin_dfp_dtstsfi_eq_td (const int, _Decimal128); + TSTSFI_EQ_TD TSTSFI_EQ_TD_DEPR1 + +[VEC_TSTSFI_GT_DD, SKIP, __builtin_dfp_dtstsfi_gt_dd, _ARCH_PWR9] + signed int __builtin_dfp_dtstsfi_gt_dd (const int, _Decimal64); + TSTSFI_GT_DD TSTSFI_GT_DD_DEPR1 + +[VEC_TSTSFI_GT_TD, SKIP, __builtin_dfp_dtstsfi_gt_td, _ARCH_PWR9] + signed int __builtin_dfp_dtstsfi_gt_td (const int, _Decimal128); + TSTSFI_GT_TD TSTSFI_GT_TD_DEPR1 + +[VEC_TSTSFI_LT_DD, SKIP, __builtin_dfp_dtstsfi_lt_dd, _ARCH_PWR9] + signed int __builtin_dfp_dtstsfi_lt_dd (const int, _Decimal64); + TSTSFI_LT_DD TSTSFI_LT_DD_DEPR1 + +[VEC_TSTSFI_LT_TD, SKIP, __builtin_dfp_dtstsfi_lt_td, _ARCH_PWR9] + signed int __builtin_dfp_dtstsfi_lt_td (const int, _Decimal128); + TSTSFI_LT_TD TSTSFI_LT_TD_DEPR1 + +[VEC_TSTSFI_OV_DD, SKIP, __builtin_dfp_dtstsfi_ov_dd, _ARCH_PWR9] + signed int __builtin_dfp_dtstsfi_ov_dd (const int, _Decimal64); + TSTSFI_OV_DD TSTSFI_OV_DD_DEPR1 + +[VEC_TSTSFI_OV_TD, SKIP, __builtin_dfp_dtstsfi_ov_td, _ARCH_PWR9] + signed int __builtin_dfp_dtstsfi_ov_td (const int, _Decimal128); + TSTSFI_OV_TD TSTSFI_OV_TD_DEPR1 + +[VEC_VADDCUQ, vec_vaddcuq, __builtin_vec_vaddcuq, _ARCH_PWR8] + vsq __builtin_vec_vaddcuq (vsq, vsq); + VADDCUQ VADDCUQ_DEPR1 + vuq __builtin_vec_vaddcuq (vuq, vuq); + VADDCUQ VADDCUQ_DEPR2 + +[VEC_VADDECUQ, vec_vaddecuq, __builtin_vec_vaddecuq, _ARCH_PWR8] + vsq __builtin_vec_vaddecuq (vsq, vsq, vsq); + VADDECUQ VADDECUQ_DEPR1 + vuq __builtin_vec_vaddecuq (vuq, vuq, vuq); + VADDECUQ VADDECUQ_DEPR2 + +[VEC_VADDEUQM, vec_vaddeuqm, __builtin_vec_vaddeuqm, _ARCH_PWR8] + vsq __builtin_vec_vaddeuqm (vsq, vsq, vsq); + VADDEUQM VADDEUQM_DEPR1 + vuq __builtin_vec_vaddeuqm (vuq, vuq, vuq); + VADDEUQM VADDEUQM_DEPR2 + +[VEC_VADDFP, vec_vaddfp, __builtin_vec_vaddfp] + vf __builtin_vec_vaddfp (vf, vf); + VADDFP VADDFP_DEPR1 + +[VEC_VADDSBS, vec_vaddsbs, __builtin_vec_vaddsbs] + vsc __builtin_vec_vaddsbs (vsc, vsc); + VADDSBS VADDSBS_DEPR1 + vsc __builtin_vec_vaddsbs (vbc, vsc); + VADDSBS VADDSBS_DEPR2 + vsc __builtin_vec_vaddsbs (vsc, vbc); + VADDSBS VADDSBS_DEPR3 + +[VEC_VADDSHS, vec_vaddshs, __builtin_vec_vaddshs] + vss __builtin_vec_vaddshs (vss, vss); + VADDSHS VADDSHS_DEPR1 + vss __builtin_vec_vaddshs (vbs, vss); + VADDSHS VADDSHS_DEPR2 + vss __builtin_vec_vaddshs (vss, vbs); + VADDSHS VADDSHS_DEPR3 + +[VEC_VADDSWS, vec_vaddsws, __builtin_vec_vaddsws] + vsi __builtin_vec_vaddsws (vsi, vsi); + VADDSWS VADDSWS_DEPR1 + vsi __builtin_vec_vaddsws (vbi, vsi); + VADDSWS VADDSWS_DEPR2 + vsi __builtin_vec_vaddsws (vsi, vbi); + VADDSWS VADDSWS_DEPR3 + +[VEC_VADDUBM, vec_vaddubm, __builtin_vec_vaddubm] + vsc __builtin_vec_vaddubm (vsc, vsc); + VADDUBM VADDUBM_DEPR1 + vuc __builtin_vec_vaddubm (vsc, vuc); + VADDUBM VADDUBM_DEPR2 + vuc __builtin_vec_vaddubm (vuc, vsc); + VADDUBM VADDUBM_DEPR3 + vuc __builtin_vec_vaddubm (vuc, vuc); + VADDUBM VADDUBM_DEPR4 + vsc __builtin_vec_vaddubm (vbc, vsc); + VADDUBM VADDUBM_DEPR5 + vsc __builtin_vec_vaddubm (vsc, vbc); + VADDUBM VADDUBM_DEPR6 + vuc __builtin_vec_vaddubm (vbc, vuc); + VADDUBM VADDUBM_DEPR7 + vuc __builtin_vec_vaddubm (vuc, vbc); + VADDUBM VADDUBM_DEPR8 + +[VEC_VADDUBS, vec_vaddubs, __builtin_vec_vaddubs] + vuc __builtin_vec_vaddubs (vsc, vuc); + VADDUBS VADDUBS_DEPR1 + vuc __builtin_vec_vaddubs (vuc, vsc); + VADDUBS VADDUBS_DEPR2 + vuc __builtin_vec_vaddubs (vuc, vuc); + VADDUBS VADDUBS_DEPR3 + vuc __builtin_vec_vaddubs (vbc, vuc); + VADDUBS VADDUBS_DEPR4 + vuc __builtin_vec_vaddubs (vuc, vbc); + VADDUBS VADDUBS_DEPR5 + +[VEC_VADDUDM, vec_vaddudm, __builtin_vec_vaddudm, _ARCH_PWR8] + vsll __builtin_vec_vaddudm (vbll, vsll); + VADDUDM VADDUDM_DEPR1 + vsll __builtin_vec_vaddudm (vsll, vbll); + VADDUDM VADDUDM_DEPR2 + vsll __builtin_vec_vaddudm (vsll, vsll); + VADDUDM VADDUDM_DEPR3 + vull __builtin_vec_vaddudm (vbll, vull); + VADDUDM VADDUDM_DEPR4 + vull __builtin_vec_vaddudm (vull, vbll); + VADDUDM VADDUDM_DEPR5 + vull __builtin_vec_vaddudm (vull, vull); + VADDUDM VADDUDM_DEPR6 + +[VEC_VADDUHM, vec_vadduhm, __builtin_vec_vadduhm] + vss __builtin_vec_vadduhm (vss, vss); + VADDUHM VADDUHM_DEPR1 + vus __builtin_vec_vadduhm (vss, vus); + VADDUHM VADDUHM_DEPR2 + vus __builtin_vec_vadduhm (vus, vss); + VADDUHM VADDUHM_DEPR3 + vus __builtin_vec_vadduhm (vus, vus); + VADDUHM VADDUHM_DEPR4 + vss __builtin_vec_vadduhm (vbs, vss); + VADDUHM VADDUHM_DEPR5 + vss __builtin_vec_vadduhm (vss, vbs); + VADDUHM VADDUHM_DEPR6 + vus __builtin_vec_vadduhm (vbs, vus); + VADDUHM VADDUHM_DEPR7 + vus __builtin_vec_vadduhm (vus, vbs); + VADDUHM VADDUHM_DEPR8 + +[VEC_VADDUHS, vec_vadduhs, __builtin_vec_vadduhs] + vus __builtin_vec_vadduhs (vss, vus); + VADDUHS VADDUHS_DEPR1 + vus __builtin_vec_vadduhs (vus, vss); + VADDUHS VADDUHS_DEPR2 + vus __builtin_vec_vadduhs (vus, vus); + VADDUHS VADDUHS_DEPR3 + vus __builtin_vec_vadduhs (vbs, vus); + VADDUHS VADDUHS_DEPR4 + vus __builtin_vec_vadduhs (vus, vbs); + VADDUHS VADDUHS_DEPR5 + +[VEC_VADDUQM, vec_vadduqm, __builtin_vec_vadduqm, _ARCH_PWR8] + vsq __builtin_vec_vadduqm (vsq, vsq); + VADDUQM VADDUQM_DEPR1 + vuq __builtin_vec_vadduqm (vuq, vuq); + VADDUQM VADDUQM_DEPR2 + +[VEC_VADDUWM, vec_vadduwm, __builtin_vec_vadduwm] + vsi __builtin_vec_vadduwm (vsi, vsi); + VADDUWM VADDUWM_DEPR1 + vui __builtin_vec_vadduwm (vsi, vui); + VADDUWM VADDUWM_DEPR2 + vui __builtin_vec_vadduwm (vui, vsi); + VADDUWM VADDUWM_DEPR3 + vui __builtin_vec_vadduwm (vui, vui); + VADDUWM VADDUWM_DEPR4 + vsi __builtin_vec_vadduwm (vbi, vsi); + VADDUWM VADDUWM_DEPR5 + vsi __builtin_vec_vadduwm (vsi, vbi); + VADDUWM VADDUWM_DEPR6 + vui __builtin_vec_vadduwm (vbi, vui); + VADDUWM VADDUWM_DEPR7 + vui __builtin_vec_vadduwm (vui, vbi); + VADDUWM VADDUWM_DEPR8 + +[VEC_VADDUWS, vec_vadduws, __builtin_vec_vadduws] + vui __builtin_vec_vadduws (vsi, vui); + VADDUWS VADDUWS_DEPR1 + vui __builtin_vec_vadduws (vui, vsi); + VADDUWS VADDUWS_DEPR2 + vui __builtin_vec_vadduws (vui, vui); + VADDUWS VADDUWS_DEPR3 + vui __builtin_vec_vadduws (vbi, vui); + VADDUWS VADDUWS_DEPR4 + vui __builtin_vec_vadduws (vui, vbi); + VADDUWS VADDUWS_DEPR5 + +[VEC_VADUB, vec_absdb, __builtin_vec_vadub] + vuc __builtin_vec_vadub (vuc, vuc); + VADUB VADUB_DEPR1 + +[VEC_VADUH, vec_absdh, __builtin_vec_vaduh] + vus __builtin_vec_vaduh (vus, vus); + VADUH VADUH_DEPR1 + +[VEC_VADUW, vec_absdw, __builtin_vec_vaduw] + vui __builtin_vec_vaduw (vui, vui); + VADUW VADUW_DEPR1 + +[VEC_VAVGSB, vec_vavgsb, __builtin_vec_vavgsb] + vsc __builtin_vec_vavgsb (vsc, vsc); + VAVGSB VAVGSB_DEPR1 + +[VEC_VAVGSH, vec_vavgsh, __builtin_vec_vavgsh] + vss __builtin_vec_vavgsh (vss, vss); + VAVGSH VAVGSH_DEPR1 + +[VEC_VAVGSW, vec_vavgsw, __builtin_vec_vavgsw] + vsi __builtin_vec_vavgsw (vsi, vsi); + VAVGSW VAVGSW_DEPR1 + +[VEC_VAVGUB, vec_vavgub, __builtin_vec_vavgub] + vuc __builtin_vec_vavgub (vuc, vuc); + VAVGUB VAVGUB_DEPR1 + +[VEC_VAVGUH, vec_vavguh, __builtin_vec_vavguh] + vus __builtin_vec_vavguh (vus, vus); + VAVGUH VAVGUH_DEPR1 + +[VEC_VAVGUW, vec_vavguw, __builtin_vec_vavguw] + vui __builtin_vec_vavguw (vui, vui); + VAVGUW VAVGUW_DEPR1 + +[VEC_VBPERMQ, vec_vbpermq, __builtin_vec_vbpermq, _ARCH_PWR8] + vull __builtin_vec_vbpermq (vull, vuc); + VBPERMQ VBPERMQ_DEPR1 + vsll __builtin_vec_vbpermq (vsc, vsc); + VBPERMQ VBPERMQ_DEPR2 + vull __builtin_vec_vbpermq (vuc, vuc); + VBPERMQ VBPERMQ_DEPR3 + vull __builtin_vec_vbpermq (vuq, vuc); + VBPERMQ VBPERMQ_DEPR4 + +[VEC_VCFSX, vec_vcfsx, __builtin_vec_vcfsx] + vf __builtin_vec_vcfsx (vsi, const int); + VCFSX VCFSX_DEPR1 + +[VEC_VCFUX, vec_vcfux, __builtin_vec_vcfux] + vf __builtin_vec_vcfux (vui, const int); + VCFUX VCFUX_DEPR1 + +[VEC_VCLZB, vec_vclzb, __builtin_vec_vclzb, _ARCH_PWR8] + vsc __builtin_vec_vclzb (vsc); + VCLZB VCLZB_DEPR1 + vuc __builtin_vec_vclzb (vuc); + VCLZB VCLZB_DEPR2 + +[VEC_VCLZD, vec_vclzd, __builtin_vec_vclzd, _ARCH_PWR8] + vsll __builtin_vec_vclzd (vsll); + VCLZD VCLZD_DEPR1 + vull __builtin_vec_vclzd (vull); + VCLZD VCLZD_DEPR2 + +[VEC_VCLZH, vec_vclzh, __builtin_vec_vclzh, _ARCH_PWR8] + vss __builtin_vec_vclzh (vss); + VCLZH VCLZH_DEPR1 + vus __builtin_vec_vclzh (vus); + VCLZH VCLZH_DEPR2 + +[VEC_VCLZW, vec_vclzw, __builtin_vec_vclzw, _ARCH_PWR8] + vsi __builtin_vec_vclzw (vsi); + VCLZW VCLZW_DEPR1 + vui __builtin_vec_vclzw (vui); + VCLZW VCLZW_DEPR2 + +[VEC_VCMPEQFP, vec_vcmpeqfp, __builtin_vec_vcmpeqfp] + vbi __builtin_vec_vcmpeqfp (vf, vf); + VCMPEQFP VCMPEQFP_DEPR1 + +[VEC_VCMPEQUB, vec_vcmpequb, __builtin_vec_vcmpequb] + vbc __builtin_vec_vcmpequb (vsc, vsc); + VCMPEQUB VCMPEQUB_DEPR1 + vbc __builtin_vec_vcmpequb (vuc, vuc); + VCMPEQUB VCMPEQUB_DEPR2 + +[VEC_VCMPEQUH, vec_vcmpequh, __builtin_vec_vcmpequh] + vbs __builtin_vec_vcmpequh (vss, vss); + VCMPEQUH VCMPEQUH_DEPR1 + vbs __builtin_vec_vcmpequh (vus, vus); + VCMPEQUH VCMPEQUH_DEPR2 + +[VEC_VCMPEQUW, vec_vcmpequw, __builtin_vec_vcmpequw] + vbi __builtin_vec_vcmpequw (vsi, vsi); + VCMPEQUW VCMPEQUW_DEPR1 + vbi __builtin_vec_vcmpequw (vui, vui); + VCMPEQUW VCMPEQUW_DEPR2 + +[VEC_VCMPGTFP, vec_vcmpgtfp, __builtin_vec_vcmpgtfp] + vbi __builtin_vec_vcmpgtfp (vf, vf); + VCMPGTFP VCMPGTFP_DEPR1 + +[VEC_VCMPGTSB, vec_vcmpgtsb, __builtin_vec_vcmpgtsb] + vbc __builtin_vec_vcmpgtsb (vsc, vsc); + VCMPGTSB VCMPGTSB_DEPR1 + +[VEC_VCMPGTSH, vec_vcmpgtsh, __builtin_vec_vcmpgtsh] + vbs __builtin_vec_vcmpgtsh (vss, vss); + VCMPGTSH VCMPGTSH_DEPR1 + +[VEC_VCMPGTSW, vec_vcmpgtsw, __builtin_vec_vcmpgtsw] + vbi __builtin_vec_vcmpgtsw (vsi, vsi); + VCMPGTSW VCMPGTSW_DEPR1 + +[VEC_VCMPGTUB, vec_vcmpgtub, __builtin_vec_vcmpgtub] + vbc __builtin_vec_vcmpgtub (vuc, vuc); + VCMPGTUB VCMPGTUB_DEPR1 + +[VEC_VCMPGTUH, vec_vcmpgtuh, __builtin_vec_vcmpgtuh] + vbs __builtin_vec_vcmpgtuh (vus, vus); + VCMPGTUH VCMPGTUH_DEPR1 + +[VEC_VCMPGTUW, vec_vcmpgtuw, __builtin_vec_vcmpgtuw] + vbi __builtin_vec_vcmpgtuw (vui, vui); + VCMPGTUW VCMPGTUW_DEPR1 + +[VEC_VCTZB, vec_vctzb, __builtin_vec_vctzb, _ARCH_PWR9] + vsc __builtin_vec_vctzb (vsc); + VCTZB VCTZB_DEPR1 + vuc __builtin_vec_vctzb (vuc); + VCTZB VCTZB_DEPR2 + +[VEC_VCTZD, vec_vctzd, __builtin_vec_vctzd, _ARCH_PWR9] + vsll __builtin_vec_vctzd (vsll); + VCTZD VCTZD_DEPR1 + vull __builtin_vec_vctzd (vull); + VCTZD VCTZD_DEPR2 + +[VEC_VCTZH, vec_vctzh, __builtin_vec_vctzh, _ARCH_PWR9] + vss __builtin_vec_vctzh (vss); + VCTZH VCTZH_DEPR1 + vus __builtin_vec_vctzh (vus); + VCTZH VCTZH_DEPR2 + +[VEC_VCTZW, vec_vctzw, __builtin_vec_vctzw, _ARCH_PWR9] + vsi __builtin_vec_vctzw (vsi); + VCTZW VCTZW_DEPR1 + vui __builtin_vec_vctzw (vui); + VCTZW VCTZW_DEPR2 + +[VEC_VEEDP, vec_extract_exp_dp, __builtin_vec_extract_exp_dp, _ARCH_PWR9] + vull __builtin_vec_extract_exp_dp (vd); + VEEDP VEEDP_DEPR1 + +[VEC_VEESP, vec_extract_exp_sp, __builtin_vec_extract_exp_sp, _ARCH_PWR9] + vui __builtin_vec_extract_exp_sp (vf); + VEESP VEESP_DEPR1 + +[VEC_VESDP, vec_extract_sig_dp, __builtin_vec_extract_sig_dp, _ARCH_PWR9] + vull __builtin_vec_extract_sig_dp (vd); + VESDP VESDP_DEPR1 + +[VEC_VESSP, vec_extract_sig_sp, __builtin_vec_extract_sig_sp, _ARCH_PWR9] + vui __builtin_vec_extract_sig_sp (vf); + VESSP VESSP_DEPR1 + +[VEC_VIEDP, vec_insert_exp_dp, __builtin_vec_insert_exp_dp, _ARCH_PWR9] + vd __builtin_vec_insert_exp_dp (vd, vull); + VIEDP VIEDP_DEPR1 + vd __builtin_vec_insert_exp_dp (vull, vull); + VIEDP VIEDP_DEPR2 + +[VEC_VIESP, vec_insert_exp_sp, __builtin_vec_insert_exp_sp, _ARCH_PWR9] + vf __builtin_vec_insert_exp_sp (vf, vui); + VIESP VIESP_DEPR1 + vf __builtin_vec_insert_exp_sp (vui, vui); + VIESP VIESP_DEPR2 + +[VEC_VMAXFP, vec_vmaxfp, __builtin_vec_vmaxfp] + vf __builtin_vec_vmaxfp (vf, vf); + VMAXFP VMAXFP_DEPR1 + +[VEC_VMAXSB, vec_vmaxsb, __builtin_vec_vmaxsb] + vsc __builtin_vec_vmaxsb (vsc, vsc); + VMAXSB VMAXSB_DEPR1 + vsc __builtin_vec_vmaxsb (vbc, vsc); + VMAXSB VMAXSB_DEPR2 + vsc __builtin_vec_vmaxsb (vsc, vbc); + VMAXSB VMAXSB_DEPR3 + +[VEC_VMAXSD, vec_vmaxsd, __builtin_vec_vmaxsd] + vsll __builtin_vec_vmaxsd (vsll, vsll); + VMAXSD VMAXSD_DEPR1 + vsll __builtin_vec_vmaxsd (vbll, vsll); + VMAXSD VMAXSD_DEPR2 + vsll __builtin_vec_vmaxsd (vsll, vbll); + VMAXSD VMAXSD_DEPR3 + +[VEC_VMAXSH, vec_vmaxsh, __builtin_vec_vmaxsh] + vss __builtin_vec_vmaxsh (vss, vss); + VMAXSH VMAXSH_DEPR1 + vss __builtin_vec_vmaxsh (vbs, vss); + VMAXSH VMAXSH_DEPR2 + vss __builtin_vec_vmaxsh (vss, vbs); + VMAXSH VMAXSH_DEPR3 + +[VEC_VMAXSW, vec_vmaxsw, __builtin_vec_vmaxsw] + vsi __builtin_vec_vmaxsw (vsi, vsi); + VMAXSW VMAXSW_DEPR1 + vsi __builtin_vec_vmaxsw (vbi, vsi); + VMAXSW VMAXSW_DEPR2 + vsi __builtin_vec_vmaxsw (vsi, vbi); + VMAXSW VMAXSW_DEPR3 + +[VEC_VMAXUB, vec_vmaxub, __builtin_vec_vmaxub] + vuc __builtin_vec_vmaxub (vsc, vuc); + VMAXUB VMAXUB_DEPR1 + vuc __builtin_vec_vmaxub (vuc, vsc); + VMAXUB VMAXUB_DEPR2 + vuc __builtin_vec_vmaxub (vuc, vuc); + VMAXUB VMAXUB_DEPR3 + vuc __builtin_vec_vmaxub (vbc, vuc); + VMAXUB VMAXUB_DEPR4 + vuc __builtin_vec_vmaxub (vuc, vbc); + VMAXUB VMAXUB_DEPR5 + +[VEC_VMAXUD, vec_vmaxud, __builtin_vec_vmaxud] + vull __builtin_vec_vmaxud (vull, vull); + VMAXUD VMAXUD_DEPR1 + vull __builtin_vec_vmaxud (vbll, vull); + VMAXUD VMAXUD_DEPR2 + vull __builtin_vec_vmaxud (vull, vbll); + VMAXUD VMAXUD_DEPR3 + +[VEC_VMAXUH, vec_vmaxuh, __builtin_vec_vmaxuh] + vus __builtin_vec_vmaxuh (vss, vus); + VMAXUH VMAXUH_DEPR1 + vus __builtin_vec_vmaxuh (vus, vss); + VMAXUH VMAXUH_DEPR2 + vus __builtin_vec_vmaxuh (vus, vus); + VMAXUH VMAXUH_DEPR3 + vus __builtin_vec_vmaxuh (vbs, vus); + VMAXUH VMAXUH_DEPR4 + vus __builtin_vec_vmaxuh (vus, vbs); + VMAXUH VMAXUH_DEPR5 + +[VEC_VMAXUW, vec_vmaxuw, __builtin_vec_vmaxuw] + vui __builtin_vec_vmaxuw (vsi, vui); + VMAXUW VMAXUW_DEPR1 + vui __builtin_vec_vmaxuw (vui, vsi); + VMAXUW VMAXUW_DEPR2 + vui __builtin_vec_vmaxuw (vui, vui); + VMAXUW VMAXUW_DEPR3 + vui __builtin_vec_vmaxuw (vbi, vui); + VMAXUW VMAXUW_DEPR4 + vui __builtin_vec_vmaxuw (vui, vbi); + VMAXUW VMAXUW_DEPR5 + +[VEC_VMINFP, vec_vminfp, __builtin_vec_vminfp] + vf __builtin_vec_vminfp (vf, vf); + VMINFP VMINFP_DEPR1 + +[VEC_VMINSB, vec_vminsb, __builtin_vec_vminsb] + vsc __builtin_vec_vminsb (vsc, vsc); + VMINSB VMINSB_DEPR1 + vsc __builtin_vec_vminsb (vbc, vsc); + VMINSB VMINSB_DEPR2 + vsc __builtin_vec_vminsb (vsc, vbc); + VMINSB VMINSB_DEPR3 + +[VEC_VMINSD, vec_vminsd, __builtin_vec_vminsd] + vsll __builtin_vec_vminsd (vsll, vsll); + VMINSD VMINSD_DEPR1 + vsll __builtin_vec_vminsd (vbll, vsll); + VMINSD VMINSD_DEPR2 + vsll __builtin_vec_vminsd (vsll, vbll); + VMINSD VMINSD_DEPR3 + +[VEC_VMINSH, vec_vminsh, __builtin_vec_vminsh] + vss __builtin_vec_vminsh (vss, vss); + VMINSH VMINSH_DEPR1 + vss __builtin_vec_vminsh (vbs, vss); + VMINSH VMINSH_DEPR2 + vss __builtin_vec_vminsh (vss, vbs); + VMINSH VMINSH_DEPR3 + +[VEC_VMINSW, vec_vminsw, __builtin_vec_vminsw] + vsi __builtin_vec_vminsw (vsi, vsi); + VMINSW VMINSW_DEPR1 + vsi __builtin_vec_vminsw (vbi, vsi); + VMINSW VMINSW_DEPR2 + vsi __builtin_vec_vminsw (vsi, vbi); + VMINSW VMINSW_DEPR3 + +[VEC_VMINUB, vec_vminub, __builtin_vec_vminub] + vuc __builtin_vec_vminub (vsc, vuc); + VMINUB VMINUB_DEPR1 + vuc __builtin_vec_vminub (vuc, vsc); + VMINUB VMINUB_DEPR2 + vuc __builtin_vec_vminub (vuc, vuc); + VMINUB VMINUB_DEPR3 + vuc __builtin_vec_vminub (vbc, vuc); + VMINUB VMINUB_DEPR4 + vuc __builtin_vec_vminub (vuc, vbc); + VMINUB VMINUB_DEPR5 + +[VEC_VMINUD, vec_vminud, __builtin_vec_vminud] + vull __builtin_vec_vminud (vull, vull); + VMINUD VMINUD_DEPR1 + vull __builtin_vec_vminud (vbll, vull); + VMINUD VMINUD_DEPR2 + vull __builtin_vec_vminud (vull, vbll); + VMINUD VMINUD_DEPR3 + +[VEC_VMINUH, vec_vminuh, __builtin_vec_vminuh] + vus __builtin_vec_vminuh (vss, vus); + VMINUH VMINUH_DEPR1 + vus __builtin_vec_vminuh (vus, vss); + VMINUH VMINUH_DEPR2 + vus __builtin_vec_vminuh (vus, vus); + VMINUH VMINUH_DEPR3 + vus __builtin_vec_vminuh (vbs, vus); + VMINUH VMINUH_DEPR4 + vus __builtin_vec_vminuh (vus, vbs); + VMINUH VMINUH_DEPR5 + +[VEC_VMINUW, vec_vminuw, __builtin_vec_vminuw] + vui __builtin_vec_vminuw (vsi, vui); + VMINUW VMINUW_DEPR1 + vui __builtin_vec_vminuw (vui, vsi); + VMINUW VMINUW_DEPR2 + vui __builtin_vec_vminuw (vui, vui); + VMINUW VMINUW_DEPR3 + vui __builtin_vec_vminuw (vbi, vui); + VMINUW VMINUW_DEPR4 + vui __builtin_vec_vminuw (vui, vbi); + VMINUW VMINUW_DEPR5 + +[VEC_VMRGHB, vec_vmrghb, __builtin_vec_vmrghb] + vsc __builtin_vec_vmrghb (vsc, vsc); + VMRGHB VMRGHB_DEPR1 + vuc __builtin_vec_vmrghb (vuc, vuc); + VMRGHB VMRGHB_DEPR2 + vbc __builtin_vec_vmrghb (vbc, vbc); + VMRGHB VMRGHB_DEPR3 + +[VEC_VMRGHH, vec_vmrghh, __builtin_vec_vmrghh] + vss __builtin_vec_vmrghh (vss, vss); + VMRGHH VMRGHH_DEPR1 + vus __builtin_vec_vmrghh (vus, vus); + VMRGHH VMRGHH_DEPR2 + vbs __builtin_vec_vmrghh (vbs, vbs); + VMRGHH VMRGHH_DEPR3 + vp __builtin_vec_vmrghh (vp, vp); + VMRGHH VMRGHH_DEPR4 + +[VEC_VMRGHW, vec_vmrghw, __builtin_vec_vmrghw] + vf __builtin_vec_vmrghw (vf, vf); + VMRGHW VMRGHW_DEPR1 + vsi __builtin_vec_vmrghw (vsi, vsi); + VMRGHW VMRGHW_DEPR2 + vui __builtin_vec_vmrghw (vui, vui); + VMRGHW VMRGHW_DEPR3 + vbi __builtin_vec_vmrghw (vbi, vbi); + VMRGHW VMRGHW_DEPR4 + +[VEC_VMRGLB, vec_vmrglb, __builtin_vec_vmrglb] + vsc __builtin_vec_vmrglb (vsc, vsc); + VMRGLB VMRGLB_DEPR1 + vuc __builtin_vec_vmrglb (vuc, vuc); + VMRGLB VMRGLB_DEPR2 + vbc __builtin_vec_vmrglb (vbc, vbc); + VMRGLB VMRGLB_DEPR3 + +[VEC_VMRGLH, vec_vmrglh, __builtin_vec_vmrglh] + vss __builtin_vec_vmrglh (vss, vss); + VMRGLH VMRGLH_DEPR1 + vus __builtin_vec_vmrglh (vus, vus); + VMRGLH VMRGLH_DEPR2 + vbs __builtin_vec_vmrglh (vbs, vbs); + VMRGLH VMRGLH_DEPR3 + vp __builtin_vec_vmrglh (vp, vp); + VMRGLH VMRGLH_DEPR4 + +[VEC_VMRGLW, vec_vmrglw, __builtin_vec_vmrglw] + vf __builtin_vec_vmrglw (vf, vf); + VMRGLW VMRGLW_DEPR1 + vsi __builtin_vec_vmrglw (vsi, vsi); + VMRGLW VMRGLW_DEPR2 + vui __builtin_vec_vmrglw (vui, vui); + VMRGLW VMRGLW_DEPR3 + vbi __builtin_vec_vmrglw (vbi, vbi); + VMRGLW VMRGLW_DEPR4 + +[VEC_VMSUMMBM, vec_vmsummbm, __builtin_vec_vmsummbm] + vsi __builtin_vec_vmsummbm (vsc, vuc, vsi); + VMSUMMBM VMSUMMBM_DEPR1 + +[VEC_VMSUMSHM, vec_vmsumshm, __builtin_vec_vmsumshm] + vsi __builtin_vec_vmsumshm (vss, vss, vsi); + VMSUMSHM VMSUMSHM_DEPR1 + +[VEC_VMSUMSHS, vec_vmsumshs, __builtin_vec_vmsumshs] + vsi __builtin_vec_vmsumshs (vss, vss, vsi); + VMSUMSHS VMSUMSHS_DEPR1 + +[VEC_VMSUMUBM, vec_vmsumubm, __builtin_vec_vmsumubm] + vui __builtin_vec_vmsumubm (vuc, vuc, vui); + VMSUMUBM VMSUMUBM_DEPR1 + +[VEC_VMSUMUDM, vec_vmsumudm, __builtin_vec_vmsumudm] + vuq __builtin_vec_vmsumudm (vull, vull, vuq); + VMSUMUDM VMSUMUDM_DEPR1 + +[VEC_VMSUMUHM, vec_vmsumuhm, __builtin_vec_vmsumuhm] + vui __builtin_vec_vmsumuhm (vus, vus, vui); + VMSUMUHM VMSUMUHM_DEPR1 + +[VEC_VMSUMUHS, vec_vmsumuhs, __builtin_vec_vmsumuhs] + vui __builtin_vec_vmsumuhs (vus, vus, vui); + VMSUMUHS VMSUMUHS_DEPR1 + +[VEC_VMULESB, vec_vmulesb, __builtin_vec_vmulesb] + vss __builtin_vec_vmulesb (vsc, vsc); + VMULESB VMULESB_DEPR1 + +[VEC_VMULESH, vec_vmulesh, __builtin_vec_vmulesh] + vsi __builtin_vec_vmulesh (vss, vss); + VMULESH VMULESH_DEPR1 + +[VEC_VMULESW, SKIP, __builtin_vec_vmulesw] + vsll __builtin_vec_vmulesw (vsi, vsi); + VMULESW VMULESW_DEPR1 + +[VEC_VMULEUB, vec_vmuleub, __builtin_vec_vmuleub] + vus __builtin_vec_vmuleub (vuc, vuc); + VMULEUB VMULEUB_DEPR1 + +[VEC_VMULEUH, vec_vmuleuh, __builtin_vec_vmuleuh] + vui __builtin_vec_vmuleuh (vus, vus); + VMULEUH VMULEUH_DEPR1 + +[VEC_VMULEUW, SKIP, __builtin_vec_vmuleuw] + vull __builtin_vec_vmuleuw (vui, vui); + VMULEUW VMULEUW_DEPR1 + +[VEC_VMULOSB, vec_vmulosb, __builtin_vec_vmulosb] + vss __builtin_vec_vmulosb (vsc, vsc); + VMULOSB VMULOSB_DEPR1 + +[VEC_VMULOSH, vec_vmulosh, __builtin_vec_vmulosh] + vsi __builtin_vec_vmulosh (vss, vss); + VMULOSH VMULOSH_DEPR1 + +[VEC_VMULOSW, SKIP, __builtin_vec_vmulosw] + vsll __builtin_vec_vmulosw (vsi, vsi); + VMULOSW VMULOSW_DEPR1 + +[VEC_VMULOUB, vec_vmuloub, __builtin_vec_vmuloub] + vus __builtin_vec_vmuloub (vuc, vuc); + VMULOUB VMULOUB_DEPR1 + +[VEC_VMULOUH, vec_vmulouh, __builtin_vec_vmulouh] + vui __builtin_vec_vmulouh (vus, vus); + VMULOUH VMULOUH_DEPR1 + +[VEC_VMULOUW, SKIP, __builtin_vec_vmulouw] + vull __builtin_vec_vmulouw (vui, vui); + VMULOUW VMULOUW_DEPR1 + +[VEC_VPKSDSS, vec_vpksdss, __builtin_vec_vpksdss, _ARCH_PWR8] + vsi __builtin_vec_vpksdss (vsll, vsll); + VPKSDSS VPKSDSS_DEPR1 + +[VEC_VPKSDUS, vec_vpksdus, __builtin_vec_vpksdus, _ARCH_PWR8] + vui __builtin_vec_vpksdus (vsll, vsll); + VPKSDUS VPKSDUS_DEPR1 + +[VEC_VPKSHSS, vec_vpkshss, __builtin_vec_vpkshss] + vsc __builtin_vec_vpkshss (vss, vss); + VPKSHSS VPKSHSS_DEPR1 + +[VEC_VPKSHUS, vec_vpkshus, __builtin_vec_vpkshus] + vuc __builtin_vec_vpkshus (vss, vss); + VPKSHUS VPKSHUS_DEPR1 + +[VEC_VPKSWSS, vec_vpkswss, __builtin_vec_vpkswss] + vss __builtin_vec_vpkswss (vsi, vsi); + VPKSWSS VPKSWSS_DEPR1 + +[VEC_VPKSWUS, vec_vpkswus, __builtin_vec_vpkswus] + vus __builtin_vec_vpkswus (vsi, vsi); + VPKSWUS VPKSWUS_DEPR1 + +[VEC_VPKUDUM, vec_vpkudum, __builtin_vec_vpkudum, _ARCH_PWR8] + vsi __builtin_vec_vpkudum (vsll, vsll); + VPKUDUM VPKUDUM_DEPR1 + vui __builtin_vec_vpkudum (vull, vull); + VPKUDUM VPKUDUM_DEPR2 + vbi __builtin_vec_vpkudum (vbll, vbll); + VPKUDUM VPKUDUM_DEPR3 + +[VEC_VPKUDUS, vec_vpkudus, __builtin_vec_vpkudus, _ARCH_PWR8] + vui __builtin_vec_vpkudus (vull, vull); + VPKUDUS VPKUDUS_DEPR1 + +[VEC_VPKUHUM, vec_vpkuhum, __builtin_vec_vpkuhum] + vsc __builtin_vec_vpkuhum (vss, vss); + VPKUHUM VPKUHUM_DEPR1 + vuc __builtin_vec_vpkuhum (vus, vus); + VPKUHUM VPKUHUM_DEPR2 + vbc __builtin_vec_vpkuhum (vbs, vbs); + VPKUHUM VPKUHUM_DEPR3 + +[VEC_VPKUHUS, vec_vpkuhus, __builtin_vec_vpkuhus] + vuc __builtin_vec_vpkuhus (vus, vus); + VPKUHUS VPKUHUS_DEPR1 + +[VEC_VPKUWUM, vec_vpkuwum, __builtin_vec_vpkuwum] + vss __builtin_vec_vpkuwum (vsi, vsi); + VPKUWUM VPKUWUM_DEPR1 + vus __builtin_vec_vpkuwum (vui, vui); + VPKUWUM VPKUWUM_DEPR2 + vbs __builtin_vec_vpkuwum (vbi, vbi); + VPKUWUM VPKUWUM_DEPR3 + +[VEC_VPKUWUS, vec_vpkuwus, __builtin_vec_vpkuwus] + vus __builtin_vec_vpkuwus (vui, vui); + VPKUWUS VPKUWUS_DEPR1 + +[VEC_VPOPCNT, vec_vpopcnt, __builtin_vec_vpopcnt, _ARCH_PWR8] + vsc __builtin_vec_vpopcnt (vsc); + VPOPCNTB VPOPCNT_DEPR1 + vuc __builtin_vec_vpopcnt (vuc); + VPOPCNTB VPOPCNT_DEPR2 + vss __builtin_vec_vpopcnt (vss); + VPOPCNTH VPOPCNT_DEPR3 + vus __builtin_vec_vpopcnt (vus); + VPOPCNTH VPOPCNT_DEPR4 + vsi __builtin_vec_vpopcnt (vsi); + VPOPCNTW VPOPCNT_DEPR5 + vui __builtin_vec_vpopcnt (vui); + VPOPCNTW VPOPCNT_DEPR6 + vsll __builtin_vec_vpopcnt (vsll); + VPOPCNTD VPOPCNT_DEPR7 + vull __builtin_vec_vpopcnt (vull); + VPOPCNTD VPOPCNT_DEPR8 + +[VEC_VPOPCNTB, vec_vpopcntb, __builtin_vec_vpopcntb, _ARCH_PWR8] + vsc __builtin_vec_vpopcntb (vsc); + VPOPCNTB VPOPCNTB_DEPR1 + vuc __builtin_vec_vpopcntb (vuc); + VPOPCNTB VPOPCNTB_DEPR2 + +[VEC_VPOPCNTD, vec_vpopcntd, __builtin_vec_vpopcntd, _ARCH_PWR8] + vsll __builtin_vec_vpopcntd (vsll); + VPOPCNTD VPOPCNTD_DEPR1 + vull __builtin_vec_vpopcntd (vull); + VPOPCNTD VPOPCNTD_DEPR2 + +[VEC_VPOPCNTH, vec_vpopcnth, __builtin_vec_vpopcnth, _ARCH_PWR8] + vss __builtin_vec_vpopcnth (vss); + VPOPCNTH VPOPCNTH_DEPR1 + vus __builtin_vec_vpopcnth (vus); + VPOPCNTH VPOPCNTH_DEPR2 + +[VEC_VPOPCNTW, vec_vpopcntw, __builtin_vec_vpopcntw, _ARCH_PWR8] + vsi __builtin_vec_vpopcntw (vsi); + VPOPCNTW VPOPCNTW_DEPR1 + vui __builtin_vec_vpopcntw (vui); + VPOPCNTW VPOPCNTW_DEPR2 + +[VEC_VPRTYBD, vec_vprtybd, __builtin_vec_vprtybd, _ARCH_PWR9] + vsll __builtin_vec_vprtybd (vsll); + VPRTYBD VPRTYBD_DEPR1 + vull __builtin_vec_vprtybd (vull); + VPRTYBD VPRTYBD_DEPR2 + +[VEC_VPRTYBQ, vec_vprtybq, __builtin_vec_vprtybq, _ARCH_PPC64_PWR9] + vsq __builtin_vec_vprtybq (vsq); + VPRTYBQ VPRTYBQ_DEPR1 + vuq __builtin_vec_vprtybq (vuq); + VPRTYBQ VPRTYBQ_DEPR2 + signed __int128 __builtin_vec_vprtybq (signed __int128); + VPRTYBQ VPRTYBQ_DEPR3 + unsigned __int128 __builtin_vec_vprtybq (unsigned __int128); + VPRTYBQ VPRTYBQ_DEPR4 + +[VEC_VPRTYBW, vec_vprtybw, __builtin_vec_vprtybw, _ARCH_PWR9] + vsi __builtin_vec_vprtybw (vsi); + VPRTYBW VPRTYBW_DEPR1 + vui __builtin_vec_vprtybw (vui); + VPRTYBW VPRTYBW_DEPR2 + +[VEC_VRLB, vec_vrlb, __builtin_vec_vrlb] + vsc __builtin_vec_vrlb (vsc, vuc); + VRLB VRLB_DEPR1 + vuc __builtin_vec_vrlb (vuc, vuc); + VRLB VRLB_DEPR2 + +[VEC_VRLD, SKIP, __builtin_vec_vrld, _ARCH_PWR8] + vsll __builtin_vec_vrld (vsll, vull); + VRLD VRLD_DEPR1 + vull __builtin_vec_vrld (vull, vull); + VRLD VRLD_DEPR2 + +[VEC_VRLH, vec_vrlh, __builtin_vec_vrlh] + vss __builtin_vec_vrlh (vss, vus); + VRLH VRLH_DEPR1 + vus __builtin_vec_vrlh (vus, vus); + VRLH VRLH_DEPR2 + +[VEC_VRLW, vec_vrlw, __builtin_vec_vrlw] + vsi __builtin_vec_vrlw (vsi, vui); + VRLW VRLW_DEPR1 + vui __builtin_vec_vrlw (vui, vui); + VRLW VRLW_DEPR2 + +[VEC_VSLB, vec_vslb, __builtin_vec_vslb] + vsc __builtin_vec_vslb (vsc, vuc); + VSLB VSLB_DEPR1 + vuc __builtin_vec_vslb (vuc, vuc); + VSLB VSLB_DEPR2 + +[VEC_VSLD, SKIP, __builtin_vec_vsld, _ARCH_PWR8] + vsll __builtin_vec_vsld (vsll, vull); + VSLD VSLD_DEPR1 + vull __builtin_vec_vsld (vull, vull); + VSLD VSLD_DEPR2 + +[VEC_VSLH, vec_vslh, __builtin_vec_vslh] + vss __builtin_vec_vslh (vss, vus); + VSLH VSLH_DEPR1 + vus __builtin_vec_vslh (vus, vus); + VSLH VSLH_DEPR2 + +[VEC_VSLW, vec_vslw, __builtin_vec_vslw] + vsi __builtin_vec_vslw (vsi, vui); + VSLW VSLW_DEPR1 + vui __builtin_vec_vslw (vui, vui); + VSLW VSLW_DEPR2 + +[VEC_VSPLTB, vec_vspltb, __builtin_vec_vspltb] + vsc __builtin_vec_vspltb (vsc, const int); + VSPLTB VSPLTB_DEPR1 + vuc __builtin_vec_vspltb (vuc, const int); + VSPLTB VSPLTB_DEPR2 + vbc __builtin_vec_vspltb (vbc, const int); + VSPLTB VSPLTB_DEPR3 + +[VEC_VSPLTH, vec_vsplth, __builtin_vec_vsplth] + vss __builtin_vec_vsplth (vss, const int); + VSPLTH VSPLTH_DEPR1 + vus __builtin_vec_vsplth (vus, const int); + VSPLTH VSPLTH_DEPR2 + vbs __builtin_vec_vsplth (vbs, const int); + VSPLTH VSPLTH_DEPR3 + vp __builtin_vec_vsplth (vp, const int); + VSPLTH VSPLTH_DEPR4 + +[VEC_VSPLTW, vec_vspltw, __builtin_vec_vspltw] + vsi __builtin_vec_vspltw (vsi, const int); + VSPLTW VSPLTW_DEPR1 + vui __builtin_vec_vspltw (vui, const int); + VSPLTW VSPLTW_DEPR2 + vbi __builtin_vec_vspltw (vbi, const int); + VSPLTW VSPLTW_DEPR3 + vf __builtin_vec_vspltw (vf, const int); + VSPLTW VSPLTW_DEPR4 + +[VEC_VSRAB, vec_vsrab, __builtin_vec_vsrab] + vsc __builtin_vec_vsrab (vsc, vuc); + VSRAB VSRAB_DEPR1 + vuc __builtin_vec_vsrab (vuc, vuc); + VSRAB VSRAB_DEPR2 + +[VEC_VSRAD, SKIP, __builtin_vec_vsrad, _ARCH_PWR8] + vsll __builtin_vec_vsrad (vsll, vull); + VSRAD VSRAD_DEPR1 + vull __builtin_vec_vsrad (vull, vull); + VSRAD VSRAD_DEPR2 + +[VEC_VSRAH, vec_vsrah, __builtin_vec_vsrah] + vss __builtin_vec_vsrah (vss, vus); + VSRAH VSRAH_DEPR1 + vus __builtin_vec_vsrah (vus, vus); + VSRAH VSRAH_DEPR2 + +[VEC_VSRAW, vec_vsraw, __builtin_vec_vsraw] + vsi __builtin_vec_vsraw (vsi, vui); + VSRAW VSRAW_DEPR1 + vui __builtin_vec_vsraw (vui, vui); + VSRAW VSRAW_DEPR2 + +[VEC_VSRB, vec_vsrb, __builtin_vec_vsrb] + vsc __builtin_vec_vsrb (vsc, vuc); + VSRB VSRB_DEPR1 + vuc __builtin_vec_vsrb (vuc, vuc); + VSRB VSRB_DEPR2 + +[VEC_VSRD, SKIP, __builtin_vec_vsrd, _ARCH_PWR8] + vsll __builtin_vec_vsrd (vsll, vull); + VSRD VSRD_DEPR1 + vull __builtin_vec_vsrd (vull, vull); + VSRD VSRD_DEPR2 + +[VEC_VSRH, vec_vsrh, __builtin_vec_vsrh] + vss __builtin_vec_vsrh (vss, vus); + VSRH VSRH_DEPR1 + vus __builtin_vec_vsrh (vus, vus); + VSRH VSRH_DEPR2 + +[VEC_VSRW, vec_vsrw, __builtin_vec_vsrw] + vsi __builtin_vec_vsrw (vsi, vui); + VSRW VSRW_DEPR1 + vui __builtin_vec_vsrw (vui, vui); + VSRW VSRW_DEPR2 + +[VEC_VSTDCDP, scalar_test_data_class_dp, __builtin_vec_scalar_test_data_class_dp, _ARCH_PWR9] + unsigned int __builtin_vec_scalar_test_data_class_dp (double, const int); + VSTDCDP VSTDCDP_DEPR1 + +[VEC_VSTDCNDP, scalar_test_neg_dp, __builtin_vec_scalar_test_neg_dp, _ARCH_PWR9] + unsigned int __builtin_vec_scalar_test_neg_dp (double); + VSTDCNDP VSTDCNDP_DEPR1 + +[VEC_VSTDCNQP, scalar_test_neg_qp, __builtin_vec_scalar_test_neg_qp, _ARCH_PWR9] + unsigned int __builtin_vec_scalar_test_neg_qp (_Float128); + VSTDCNQP VSTDCNQP_DEPR1 + +[VEC_VSTDCNSP, scalar_test_neg_sp, __builtin_vec_scalar_test_neg_sp, _ARCH_PWR9] + unsigned int __builtin_vec_scalar_test_neg_sp (float); + VSTDCNSP VSTDCNSP_DEPR1 + +[VEC_VSTDCQP, scalar_test_data_class_qp, __builtin_vec_scalar_test_data_class_qp, _ARCH_PWR9] + unsigned int __builtin_vec_scalar_test_data_class_qp (_Float128, const int); + VSTDCQP VSTDCQP_DEPR1 + +[VEC_VSTDCSP, scalar_test_data_class_sp, __builtin_vec_scalar_test_data_class_sp, _ARCH_PWR9] + unsigned int __builtin_vec_scalar_test_data_class_sp (float, const int); + VSTDCSP VSTDCSP_DEPR1 + +[VEC_VSUBCUQ, vec_vsubcuqP, __builtin_vec_vsubcuq] + vsq __builtin_vec_vsubcuq (vsq, vsq); + VSUBCUQ VSUBCUQ_DEPR1 + vuq __builtin_vec_vsubcuq (vuq, vuq); + VSUBCUQ VSUBCUQ_DEPR2 + +[VEC_VSUBECUQ, vec_vsubecuq, __builtin_vec_vsubecuq, ARCH_PWR8] + vsq __builtin_vec_vsubecuq (vsq, vsq, vsq); + VSUBECUQ VSUBECUQ_DEPR1 + vuq __builtin_vec_vsubecuq (vuq, vuq, vuq); + VSUBECUQ VSUBECUQ_DEPR2 + +[VEC_VSUBEUQM, vec_vsubeuqm, __builtin_vec_vsubeuqm, _ARCH_PWR8] + vsq __builtin_vec_vsubeuqm (vsq, vsq, vsq); + VSUBEUQM VSUBEUQM_DEPR1 + vuq __builtin_vec_vsubeuqm (vuq, vuq, vuq); + VSUBEUQM VSUBEUQM_DEPR2 + +[VEC_VSUBFP, vec_vsubfp, __builtin_vec_vsubfp] + vf __builtin_vec_vsubfp (vf, vf); + VSUBFP VSUBFP_DEPR1 + +[VEC_VSUBSBS, vec_vsubsbs, __builtin_vec_vsubsbs] + vsc __builtin_vec_vsubsbs (vsc, vsc); + VSUBSBS VSUBSBS_DEPR1 + vsc __builtin_vec_vsubsbs (vbc, vsc); + VSUBSBS VSUBSBS_DEPR2 + vsc __builtin_vec_vsubsbs (vsc, vbc); + VSUBSBS VSUBSBS_DEPR3 + +[VEC_VSUBSHS, vec_vsubshs, __builtin_vec_vsubshs] + vss __builtin_vec_vsubshs (vss, vss); + VSUBSHS VSUBSHS_DEPR1 + vss __builtin_vec_vsubshs (vbs, vss); + VSUBSHS VSUBSHS_DEPR2 + vss __builtin_vec_vsubshs (vss, vbs); + VSUBSHS VSUBSHS_DEPR3 + +[VEC_VSUBSWS, vec_vsubsws, __builtin_vec_vsubsws] + vsi __builtin_vec_vsubsws (vsi, vsi); + VSUBSWS VSUBSWS_DEPR1 + vsi __builtin_vec_vsubsws (vbi, vsi); + VSUBSWS VSUBSWS_DEPR2 + vsi __builtin_vec_vsubsws (vsi, vbi); + VSUBSWS VSUBSWS_DEPR3 + +[VEC_VSUBUBM, vec_vsububm, __builtin_vec_vsububm] + vsc __builtin_vec_vsububm (vsc, vsc); + VSUBUBM VSUBUBM_DEPR1 + vuc __builtin_vec_vsububm (vsc, vuc); + VSUBUBM VSUBUBM_DEPR2 + vuc __builtin_vec_vsububm (vuc, vsc); + VSUBUBM VSUBUBM_DEPR3 + vuc __builtin_vec_vsububm (vuc, vuc); + VSUBUBM VSUBUBM_DEPR4 + vsc __builtin_vec_vsububm (vbc, vsc); + VSUBUBM VSUBUBM_DEPR5 + vsc __builtin_vec_vsububm (vsc, vbc); + VSUBUBM VSUBUBM_DEPR6 + vuc __builtin_vec_vsububm (vbc, vuc); + VSUBUBM VSUBUBM_DEPR7 + vuc __builtin_vec_vsububm (vuc, vbc); + VSUBUBM VSUBUBM_DEPR8 + +[VEC_VSUBUBS, vec_vsububs, __builtin_vec_vsububs] + vsc __builtin_vec_vsububs (vsc, vsc); + VSUBUBS VSUBUBS_DEPR1 + vsc __builtin_vec_vsububs (vbc, vsc); + VSUBUBS VSUBUBS_DEPR2 + vsc __builtin_vec_vsububs (vsc, vbc); + VSUBUBS VSUBUBS_DEPR3 + vuc __builtin_vec_vsububs (vsc, vuc); + VSUBUBS VSUBUBS_DEPR4 + vuc __builtin_vec_vsububs (vuc, vsc); + VSUBUBS VSUBUBS_DEPR5 + vuc __builtin_vec_vsububs (vuc, vuc); + VSUBUBS VSUBUBS_DEPR6 + vuc __builtin_vec_vsububs (vbc, vuc); + VSUBUBS VSUBUBS_DEPR7 + vuc __builtin_vec_vsububs (vuc, vbc); + VSUBUBS VSUBUBS_DEPR8 + +[VEC_VSUBUDM, vec_vsubudm, __builtin_vec_vsubudm, _ARCH_PWR8] + vsll __builtin_vec_vsubudm (vbll, vsll); + VSUBUDM VSUBUDM_DEPR1 + vsll __builtin_vec_vsubudm (vsll, vbll); + VSUBUDM VSUBUDM_DEPR2 + vsll __builtin_vec_vsubudm (vsll, vsll); + VSUBUDM VSUBUDM_DEPR3 + vull __builtin_vec_vsubudm (vbll, vull); + VSUBUDM VSUBUDM_DEPR4 + vull __builtin_vec_vsubudm (vull, vbll); + VSUBUDM VSUBUDM_DEPR5 + vull __builtin_vec_vsubudm (vull, vull); + VSUBUDM VSUBUDM_DEPR6 + +[VEC_VSUBUHM, vec_vsubuhm, __builtin_vec_vsubuhm] + vss __builtin_vec_vsubuhm (vss, vss); + VSUBUHM VUSBUHM_DEPR1 + vus __builtin_vec_vsubuhm (vss, vus); + VSUBUHM VUSBUHM_DEPR2 + vus __builtin_vec_vsubuhm (vus, vss); + VSUBUHM VUSBUHM_DEPR3 + vus __builtin_vec_vsubuhm (vus, vus); + VSUBUHM VUSBUHM_DEPR4 + vss __builtin_vec_vsubuhm (vbs, vss); + VSUBUHM VUSBUHM_DEPR5 + vss __builtin_vec_vsubuhm (vss, vbs); + VSUBUHM VUSBUHM_DEPR6 + vus __builtin_vec_vsubuhm (vbs, vus); + VSUBUHM VUSBUHM_DEPR7 + vus __builtin_vec_vsubuhm (vus, vbs); + VSUBUHM VUSBUHM_DEPR8 + +[VEC_VSUBUHS, vec_vsubuhs, __builtin_vec_vsubuhs] + vus __builtin_vec_vsubuhs (vss, vus); + VSUBUHS VSUBUHS_DEPR1 + vus __builtin_vec_vsubuhs (vus, vss); + VSUBUHS VSUBUHS_DEPR2 + vus __builtin_vec_vsubuhs (vus, vus); + VSUBUHS VSUBUHS_DEPR3 + vus __builtin_vec_vsubuhs (vbs, vus); + VSUBUHS VSUBUHS_DEPR4 + vus __builtin_vec_vsubuhs (vus, vbs); + VSUBUHS VSUBUHS_DEPR5 + +[VEC_VSUBUQM, vec_vsubuqm, __builtin_vec_vsubuqm, _ARCH_PWR8] + vsq __builtin_vec_vsubuqm (vsq, vsq); + VSUBUQM VSUBUQM_DEPR1 + vuq __builtin_vec_vsubuqm (vuq, vuq); + VSUBUQM VSUBUQM_DEPR2 + +[VEC_VSUBUWM, vec_vsubuwm, __builtin_vec_vsubuwm] + vsi __builtin_vec_vsubuwm (vbi, vsi); + VSUBUWM VSUBUWM_DEPR1 + vsi __builtin_vec_vsubuwm (vsi, vbi); + VSUBUWM VSUBUWM_DEPR2 + vui __builtin_vec_vsubuwm (vbi, vui); + VSUBUWM VSUBUWM_DEPR3 + vui __builtin_vec_vsubuwm (vui, vbi); + VSUBUWM VSUBUWM_DEPR4 + vsi __builtin_vec_vsubuwm (vsi, vsi); + VSUBUWM VSUBUWM_DEPR5 + vui __builtin_vec_vsubuwm (vsi, vui); + VSUBUWM VSUBUWM_DEPR6 + vui __builtin_vec_vsubuwm (vui, vsi); + VSUBUWM VSUBUWM_DEPR7 + vui __builtin_vec_vsubuwm (vui, vui); + VSUBUWM VSUBUWM_DEPR8 + +[VEC_VSUBUWS, vec_vsubuws, __builtin_vec_vsubuws] + vui __builtin_vec_vsubuws (vsi, vui); + VSUBUWS VSUBUWS_DEPR1 + vui __builtin_vec_vsubuws (vui, vsi); + VSUBUWS VSUBUWS_DEPR2 + vui __builtin_vec_vsubuws (vui, vui); + VSUBUWS VSUBUWS_DEPR3 + vui __builtin_vec_vsubuws (vbi, vui); + VSUBUWS VSUBUWS_DEPR4 + vui __builtin_vec_vsubuws (vui, vbi); + VSUBUWS VSUBUWS_DEPR5 + +[VEC_VSUM4SBS, vec_vsum4sbs, __builtin_vec_vsum4sbs] + vsi __builtin_vec_vsum4sbs (vsc, vsi); + VSUM4SBS VSUM4SBS_DEPR1 + +[VEC_VSUM4SHS, vec_vsum4shs, __builtin_vec_vsum4shs] + vsi __builtin_vec_vsum4shs (vss, vsi); + VSUM4SHS VSUM4SHS_DEPR1 + +[VEC_VSUM4UBS, vec_vsum4ubs, __builtin_vec_vsum4ubs] + vui __builtin_vec_vsum4ubs (vuc, vui); + VSUM4UBS VSUM4UBS_DEPR1 + +[VEC_VTDCDP, vec_test_data_class_dp, __builtin_vec_test_data_class_dp, _ARCH_PWR9] + vbll __builtin_vec_test_data_class_dp (vd, const int); + VTDCDP VTDCDP_DEPR1 + +[VEC_VTDCSP, vec_test_data_class_sp, __builtin_vec_test_data_class_sp, _ARCH_PWR9] + vbi __builtin_vec_test_data_class_sp (vf, const int); + VTDCSP VTDCSP_DEPR1 + +[VEC_UNS_DOUBLEE, vec_uns_doublee, __builtin_vec_uns_doublee] + vd __builtin_vec_uns_doublee (vui); + UNS_DOUBLEE_V4SI UNS_DOUBLEE_DEPR1 + +[VEC_UNS_DOUBLEH, vec_uns_doubleh, __builtin_vec_uns_doubleh] + vd __builtin_vec_uns_doubleh (vui); + UNS_DOUBLEH_V4SI UNS_DOUBLEH_DEPR1 + +[VEC_UNS_DOUBLEL, vec_uns_doublel, __builtin_vec_uns_doublel] + vd __builtin_vec_uns_doublel (vui); + UNS_DOUBLEL_V4SI UNS_DOUBLEL_DEPR1 + +[VEC_UNS_DOUBLEO, vec_uns_doubleo, __builtin_vec_uns_doubleo] + vd __builtin_vec_uns_doubleo (vui); + UNS_DOUBLEO_V4SI UNS_DOUBLEO_DEPR1 + +[VEC_VUPKHPX, vec_vupkhpx, __builtin_vec_vupkhpx] + vui __builtin_vec_vupkhpx (vus); + VUPKHPX VUPKHPX_DEPR1 + vui __builtin_vec_vupkhpx (vp); + VUPKHPX VUPKHPX_DEPR2 + +[VEC_VUPKHSB, vec_vupkhsb, __builtin_vec_vupkhsb] + vss __builtin_vec_vupkhsb (vsc); + VUPKHSB VUPKHSB_DEPR1 + vbs __builtin_vec_vupkhsb (vbc); + VUPKHSB VUPKHSB_DEPR2 + +[VEC_VUPKHSH, vec_vupkhsh, __builtin_vec_vupkhsh] + vsi __builtin_vec_vupkhsh (vss); + VUPKHSH VUPKHSH_DEPR1 + vbi __builtin_vec_vupkhsh (vbs); + VUPKHSH VUPKHSH_DEPR2 + +[VEC_VUPKHSW, vec_vupkhsw, __builtin_vec_vupkhsw, _ARCH_PWR8] + vsll __builtin_vec_vupkhsw (vsi); + VUPKHSW VUPKHSW_DEPR1 + vbll __builtin_vec_vupkhsw (vbi); + VUPKHSW VUPKHSW_DEPR2 + +[VEC_VUPKLPX, vec_vupklpx, __builtin_vec_vupklpx] + vui __builtin_vec_vupklpx (vus); + VUPKLPX VUPKLPX_DEPR1 + vui __builtin_vec_vupklpx (vp); + VUPKLPX VUPKLPX_DEPR2 + +[VEC_VUPKLSB, vec_vupklsb, __builtin_vec_vupklsb] + vss __builtin_vec_vupklsb (vsc); + VUPKLSB VUPKLSB_DEPR1 + vbs __builtin_vec_vupklsb (vbc); + VUPKLSB VUPKLSB_DEPR2 + +[VEC_VUPKLSH, vec_vupklsh, __builtin_vec_vupklsh] + vsi __builtin_vec_vupklsh (vss); + VUPKLSH VUPKLSH_DEPR1 + vbi __builtin_vec_vupklsh (vbs); + VUPKLSH VUPKLSH_DEPR2 + +[VEC_VUPKLSW, vec_vupklsw, __builtin_vec_vupklsw, _ARCH_PWR8] + vsll __builtin_vec_vupklsw (vsi); + VUPKLSW VUPKLSW_DEPR1 + vbll __builtin_vec_vupklsw (vbi); + VUPKLSW VUPKLSW_DEPR2 diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c index e073b26..d02c1b6 100644 --- a/gcc/config/rs6000/rs6000.c +++ b/gcc/config/rs6000/rs6000.c @@ -5793,6 +5793,59 @@ rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out, default: break; } + + machine_mode in_vmode = TYPE_MODE (type_in); + machine_mode out_vmode = TYPE_MODE (type_out); + + /* Power10 supported vectorized built-in functions. */ + if (TARGET_POWER10 + && in_vmode == out_vmode + && VECTOR_UNIT_ALTIVEC_OR_VSX_P (in_vmode)) + { + machine_mode exp_mode = DImode; + machine_mode exp_vmode = V2DImode; + enum rs6000_builtins bif; + switch (fn) + { + case MISC_BUILTIN_DIVWE: + case MISC_BUILTIN_DIVWEU: + exp_mode = SImode; + exp_vmode = V4SImode; + if (fn == MISC_BUILTIN_DIVWE) + bif = P10V_BUILTIN_DIVES_V4SI; + else + bif = P10V_BUILTIN_DIVEU_V4SI; + break; + case MISC_BUILTIN_DIVDE: + case MISC_BUILTIN_DIVDEU: + if (fn == MISC_BUILTIN_DIVDE) + bif = P10V_BUILTIN_DIVES_V2DI; + else + bif = P10V_BUILTIN_DIVEU_V2DI; + break; + case P10_BUILTIN_CFUGED: + bif = P10V_BUILTIN_VCFUGED; + break; + case P10_BUILTIN_CNTLZDM: + bif = P10V_BUILTIN_VCLZDM; + break; + case P10_BUILTIN_CNTTZDM: + bif = P10V_BUILTIN_VCTZDM; + break; + case P10_BUILTIN_PDEPD: + bif = P10V_BUILTIN_VPDEPD; + break; + case P10_BUILTIN_PEXTD: + bif = P10V_BUILTIN_VPEXTD; + break; + default: + return NULL_TREE; + } + + if (in_mode == exp_mode && in_vmode == exp_vmode) + return rs6000_builtin_decls[bif]; + } + return NULL_TREE; } @@ -7955,7 +8008,7 @@ rs6000_slow_unaligned_access (machine_mode mode, unsigned int align) unsigned int rs6000_special_adjust_field_align (tree type, unsigned int computed) { - if (computed <= 32) + if (computed <= 32 || TYPE_PACKED (type)) return computed; /* Strip initial arrays. */ diff --git a/gcc/ipa-modref-tree.c b/gcc/ipa-modref-tree.c index 69395b0..8d147a1 100644 --- a/gcc/ipa-modref-tree.c +++ b/gcc/ipa-modref-tree.c @@ -101,7 +101,7 @@ test_insert_search_collapse () ASSERT_TRUE (base_node->every_ref); /* Insert base to trigger base list collapse. */ - t->insert (5, 6, a, false); + t->insert (5, 0, a, false); ASSERT_TRUE (t->every_base); ASSERT_EQ (t->bases, NULL); ASSERT_EQ (t->search (1), NULL); diff --git a/gcc/ipa-modref-tree.h b/gcc/ipa-modref-tree.h index 6f6932f..a86e684 100644 --- a/gcc/ipa-modref-tree.h +++ b/gcc/ipa-modref-tree.h @@ -322,6 +322,20 @@ struct GTY((user)) modref_ref_node every_access = true; } + /* Verify that list does not contain redundant accesses. */ + void verify () + { + size_t i, i2; + modref_access_node *a, *a2; + + FOR_EACH_VEC_SAFE_ELT (accesses, i, a) + { + FOR_EACH_VEC_SAFE_ELT (accesses, i2, a2) + if (i != i2) + gcc_assert (!a->contains (*a2)); + } + } + /* Insert access with OFFSET and SIZE. Collapse tree if it has more than MAX_ACCESSES entries. If RECORD_ADJUSTMENTs is true avoid too many interval extensions. @@ -337,6 +351,9 @@ struct GTY((user)) modref_ref_node size_t i; modref_access_node *a2; + if (flag_checking) + verify (); + if (!a.useful_p ()) { if (!every_access) @@ -388,18 +405,35 @@ private: void try_merge_with (size_t index) { - modref_access_node *a2; size_t i; - FOR_EACH_VEC_SAFE_ELT (accesses, i, a2) + for (i = 0; i < accesses->length ();) if (i != index) - if ((*accesses)[index].contains (*a2) - || (*accesses)[index].merge (*a2, false)) { - if (index == accesses->length () - 1) - index = i; - accesses->unordered_remove (i); + bool found = false, restart = false; + modref_access_node *a = &(*accesses)[i]; + modref_access_node *n = &(*accesses)[index]; + + if (n->contains (*a)) + found = true; + if (!found && n->merge (*a, false)) + found = restart = true; + if (found) + { + accesses->unordered_remove (i); + if (index == accesses->length ()) + { + index = i; + i++; + } + if (restart) + i = 0; + } + else + i++; } + else + i++; } }; @@ -444,18 +478,23 @@ struct GTY((user)) modref_base_node if (ref_node) return ref_node; - if (changed) - *changed = true; - - /* Collapse the node if too full already. */ - if (refs && refs->length () >= max_refs) + /* We always allow inserting ref 0. For non-0 refs there is upper + limit on number of entries and if exceeded, + drop ref conservatively to 0. */ + if (ref && refs && refs->length () >= max_refs) { if (dump_file) - fprintf (dump_file, "--param param=modref-max-refs limit reached\n"); - collapse (); - return NULL; + fprintf (dump_file, "--param param=modref-max-refs limit reached;" + " using 0\n"); + ref = 0; + ref_node = search (ref); + if (ref_node) + return ref_node; } + if (changed) + *changed = true; + ref_node = new (ggc_alloc <modref_ref_node <T> > ())modref_ref_node <T> (ref); vec_safe_push (refs, ref_node); @@ -513,9 +552,10 @@ struct GTY((user)) modref_tree /* Insert BASE; collapse tree if there are more than MAX_REFS. Return inserted base and if CHANGED is non-null set it to true if - something changed. */ + something changed. + If table gets full, try to insert REF instead. */ - modref_base_node <T> *insert_base (T base, bool *changed = NULL) + modref_base_node <T> *insert_base (T base, T ref, bool *changed = NULL) { modref_base_node <T> *base_node; @@ -528,18 +568,31 @@ struct GTY((user)) modref_tree if (base_node) return base_node; - if (changed) - *changed = true; - - /* Collapse the node if too full already. */ - if (bases && bases->length () >= max_bases) + /* We always allow inserting base 0. For non-0 base there is upper + limit on number of entries and if exceeded, + drop base conservatively to ref and if it still does not fit to 0. */ + if (base && bases && bases->length () >= max_bases) { + base_node = search (ref); + if (base_node) + { + if (dump_file) + fprintf (dump_file, "--param param=modref-max-bases" + " limit reached; using ref\n"); + return base_node; + } if (dump_file) - fprintf (dump_file, "--param param=modref-max-bases limit reached\n"); - collapse (); - return NULL; + fprintf (dump_file, "--param param=modref-max-bases" + " limit reached; using 0\n"); + base = 0; + base_node = search (base); + if (base_node) + return base_node; } + if (changed) + *changed = true; + base_node = new (ggc_alloc <modref_base_node <T> > ()) modref_base_node <T> (base); vec_safe_push (bases, base_node); @@ -563,8 +616,15 @@ struct GTY((user)) modref_tree return true; } - modref_base_node <T> *base_node = insert_base (base, &changed); - if (!base_node || base_node->every_ref) + modref_base_node <T> *base_node = insert_base (base, ref, &changed); + base = base_node->base; + /* If table got full we may end up with useless base. */ + if (!base && !ref && !a.useful_p ()) + { + collapse (); + return true; + } + if (base_node->every_ref) return changed; gcc_checking_assert (search (base) != NULL); @@ -577,40 +637,26 @@ struct GTY((user)) modref_tree modref_ref_node <T> *ref_node = base_node->insert_ref (ref, max_refs, &changed); + ref = ref_node->ref; - /* If we failed to insert ref, just see if there is a cleanup possible. */ - if (!ref_node) + if (ref_node->every_access) + return changed; + changed |= ref_node->insert_access (a, max_accesses, + record_adjustments); + /* See if we failed to add useful access. */ + if (ref_node->every_access) { - /* No useful ref information and no useful base; collapse everything. */ - if (!base && base_node->every_ref) + /* Collapse everything if there is no useful base and ref. */ + if (!base && !ref) { collapse (); gcc_checking_assert (changed); } - else if (changed) - cleanup (); - } - else - { - if (ref_node->every_access) - return changed; - changed |= ref_node->insert_access (a, max_accesses, - record_adjustments); - /* See if we failed to add useful access. */ - if (ref_node->every_access) + /* Collapse base if there is no useful ref. */ + else if (!ref) { - /* Collapse everything if there is no useful base and ref. */ - if (!base && !ref) - { - collapse (); - gcc_checking_assert (changed); - } - /* Collapse base if there is no useful ref. */ - else if (!ref) - { - base_node->collapse (); - gcc_checking_assert (changed); - } + base_node->collapse (); + gcc_checking_assert (changed); } } return changed; @@ -695,7 +741,7 @@ struct GTY((user)) modref_tree { if (base_node->every_ref) { - my_base_node = insert_base (base_node->base, &changed); + my_base_node = insert_base (base_node->base, 0, &changed); if (my_base_node && !my_base_node->every_ref) { my_base_node->collapse (); diff --git a/gcc/ipa-modref.c b/gcc/ipa-modref.c index 0d5ab9c..6e7788e 100644 --- a/gcc/ipa-modref.c +++ b/gcc/ipa-modref.c @@ -2445,9 +2445,9 @@ read_modref_records (lto_input_block *ib, struct data_in *data_in, if (nolto_ret) nolto_base_node = (*nolto_ret)->insert_base (base_tree ? get_alias_set (base_tree) - : 0); + : 0, 0); if (lto_ret) - lto_base_node = (*lto_ret)->insert_base (base_tree); + lto_base_node = (*lto_ret)->insert_base (base_tree, 0); size_t every_ref = streamer_read_uhwi (ib); size_t nref = streamer_read_uhwi (ib); diff --git a/gcc/match.pd b/gcc/match.pd index e5bbb12..f421c74 100644 --- a/gcc/match.pd +++ b/gcc/match.pd @@ -3390,7 +3390,6 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) (simplify (convert (lshift:s@0 (convert:s@1 @2) INTEGER_CST@3)) (if (INTEGRAL_TYPE_P (type) - && !POINTER_TYPE_P (type) && tree_nop_conversion_p (type, TREE_TYPE (@0)) && INTEGRAL_TYPE_P (TREE_TYPE (@2)) && TYPE_PRECISION (TREE_TYPE (@2)) <= TYPE_PRECISION (type)) diff --git a/gcc/params.opt b/gcc/params.opt index cec43d2..3a701e2 100644 --- a/gcc/params.opt +++ b/gcc/params.opt @@ -1015,7 +1015,7 @@ Maximum number of escape points tracked by modref per SSA-name. -param=modref-max-adjustments= Common Joined UInteger Var(param_modref_max_adjustments) Init(8) IntegerRange (0, 254) Param Optimization -Maximum number of times a given range is adjusted during the dataflow +Maximum number of times a given range is adjusted during the dataflow. -param=tm-max-aggregate-size= Common Joined UInteger Var(param_tm_max_aggregate_size) Init(9) Param Optimization diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 251af30..9d28b02 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,15 @@ +2021-08-26 Roger Sayle <roger@nextmovesoftware.com> + + * gcc.dg/tree-ssa/ssa-ccp-41.c: New test case. + +2021-08-26 Martin Liska <mliska@suse.cz> + Stefan Kneifel <stefan.kneifel@bluewin.ch> + + * gcc.target/i386/mvc5.c: Scan assembly names. + * gcc.target/i386/mvc7.c: Likewise. + * gcc.target/i386/pr95778-1.c: Update scanned patterns. + * gcc.target/i386/pr95778-2.c: Likewise. + 2021-08-25 Martin Sebor <msebor@redhat.com> * gcc.dg/tree-ssa/evrp1.c: Add -details to dump option. diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-41.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-41.c new file mode 100644 index 0000000..d2b054e --- /dev/null +++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-41.c @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -fdump-tree-optimized" } */ + +int foo(int x) +{ + int p = x & 24; + int r = 1 << p; + return r & (1<<17); +} + +/* { dg-final { scan-tree-dump "return 0;" "optimized" } } */ diff --git a/gcc/testsuite/gcc.target/i386/avx512f-pr101472.c b/gcc/testsuite/gcc.target/i386/avx512f-pr101472.c new file mode 100644 index 0000000..89c6603 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/avx512f-pr101472.c @@ -0,0 +1,49 @@ +/* PR target/101472 */ +/* { dg-do compile } */ +/* { dg-options "-mavx512f -O2" } */ +/* { dg-final { scan-assembler-times "vpscatterqd\[ \\t\]+\[^\{\n\]*ymm\[0-9\]\[^\n\]*zmm\[0-9\]\[^\n\]*{%k\[1-7\]}(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vpscatterdd\[ \\t\]+\[^\{\n\]*zmm\[0-9\]\[^\n\]*zmm\[0-9\]\[^\n\]*{%k\[1-7\]}(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vpscatterqq\[ \\t\]+\[^\{\n\]*zmm\[0-9\]\[^\n\]*zmm\[0-9\]\[^\n\]*{%k\[1-7\]}(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vpscatterdq\[ \\t\]+\[^\{\n\]*zmm\[0-9\]\[^\n\]*ymm\[0-9\]\[^\n\]*{%k\[1-7\]}(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vscatterqps\[ \\t\]+\[^\{\n\]*ymm\[0-9\]\[^\n\]*zmm\[0-9\]\[^\n\]*{%k\[1-7\]}(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vscatterdps\[ \\t\]+\[^\{\n\]*zmm\[0-9\]\[^\n\]*zmm\[0-9\]\[^\n\]*{%k\[1-7\]}(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vscatterqpd\[ \\t\]+\[^\{\n\]*zmm\[0-9\]\[^\n\]*zmm\[0-9\]\[^\n\]*{%k\[1-7\]}(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vscatterdpd\[ \\t\]+\[^\{\n\]*zmm\[0-9\]\[^\n\]*ymm\[0-9\]\[^\n\]*{%k\[1-7\]}(?:\n|\[ \\t\]+#)" 2 } } */ + +#include <immintrin.h> + +void two_scatters_epi32(void* addr, __mmask8 k1, __mmask8 k2, __m512i vindex, + __m256i a, __m512i b) +{ + _mm512_mask_i64scatter_epi32(addr, k1, vindex, a, 1); + _mm512_mask_i64scatter_epi32(addr, k2, vindex, a, 1); + _mm512_mask_i32scatter_epi32(addr, k1, vindex, b, 1); + _mm512_mask_i32scatter_epi32(addr, k2, vindex, b, 1); +} + +void two_scatters_epi64(void* addr, __mmask8 k1, __mmask8 k2, __m512i vindex, + __m256i idx, __m512i a) +{ + _mm512_mask_i64scatter_epi64(addr, k1, vindex, a, 1); + _mm512_mask_i64scatter_epi64(addr, k2, vindex, a, 1); + _mm512_mask_i32scatter_epi64(addr, k1, idx, a, 1); + _mm512_mask_i32scatter_epi64(addr, k2, idx, a, 1); +} + +void two_scatters_ps(void* addr, __mmask8 k1, __mmask8 k2, __m512i vindex, + __m256 a, __m512 b) +{ + _mm512_mask_i64scatter_ps(addr, k1, vindex, a, 1); + _mm512_mask_i64scatter_ps(addr, k2, vindex, a, 1); + _mm512_mask_i32scatter_ps(addr, k1, vindex, b, 1); + _mm512_mask_i32scatter_ps(addr, k2, vindex, b, 1); +} + +void two_scatters_pd(void* addr, __mmask8 k1, __mmask8 k2, __m512i vindex, + __m256i idx, __m512d a) +{ + _mm512_mask_i64scatter_pd(addr, k1, vindex, a, 1); + _mm512_mask_i64scatter_pd(addr, k2, vindex, a, 1); + _mm512_mask_i32scatter_pd(addr, k1, idx, a, 1); + _mm512_mask_i32scatter_pd(addr, k2, idx, a, 1); +} diff --git a/gcc/testsuite/gcc.target/i386/avx512f-vshufpd-1.c b/gcc/testsuite/gcc.target/i386/avx512f-vshufpd-1.c index d1ac01e..8df5b9d 100644 --- a/gcc/testsuite/gcc.target/i386/avx512f-vshufpd-1.c +++ b/gcc/testsuite/gcc.target/i386/avx512f-vshufpd-1.c @@ -7,11 +7,12 @@ #include <immintrin.h> __m512d x; +__m512d y; void extern avx512f_test (void) { - x = _mm512_shuffle_pd (x, x, 56); + x = _mm512_shuffle_pd (x, y, 56); x = _mm512_mask_shuffle_pd (x, 2, x, x, 56); x = _mm512_maskz_shuffle_pd (2, x, x, 56); } diff --git a/gcc/testsuite/gcc.target/i386/avx512f-vshufps-1.c b/gcc/testsuite/gcc.target/i386/avx512f-vshufps-1.c index 07a63fc..378ae4b 100644 --- a/gcc/testsuite/gcc.target/i386/avx512f-vshufps-1.c +++ b/gcc/testsuite/gcc.target/i386/avx512f-vshufps-1.c @@ -7,11 +7,12 @@ #include <immintrin.h> __m512 x; +__m512 y; void extern avx512f_test (void) { - x = _mm512_shuffle_ps (x, x, 56); + x = _mm512_shuffle_ps (x, y, 56); x = _mm512_mask_shuffle_ps (x, 2, x, x, 56); x = _mm512_maskz_shuffle_ps (2, x, x, 56); } diff --git a/gcc/testsuite/gcc.target/i386/avx512vl-pr101472.c b/gcc/testsuite/gcc.target/i386/avx512vl-pr101472.c new file mode 100644 index 0000000..6df59a2 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/avx512vl-pr101472.c @@ -0,0 +1,79 @@ +/* PR target/101472 */ +/* { dg-do compile } */ +/* { dg-options "-mavx512vl -O2" } */ +/* { dg-final { scan-assembler-times "vpscatterqd\[ \\t\]+\[^\{\n\]*xmm\[0-9\]\[^\n\]*xmm\[0-9\]\[^\n\]*{%k\[1-7\]}(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vpscatterqd\[ \\t\]+\[^\{\n\]*xmm\[0-9\]\[^\n\]*ymm\[0-9\]\[^\n\]*{%k\[1-7\]}(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vpscatterdd\[ \\t\]+\[^\{\n\]*xmm\[0-9\]\[^\n\]*xmm\[0-9\]\[^\n\]*{%k\[1-7\]}(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vpscatterdd\[ \\t\]+\[^\{\n\]*ymm\[0-9\]\[^\n\]*ymm\[0-9\]\[^\n\]*{%k\[1-7\]}(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vpscatterqq\[ \\t\]+\[^\{\n\]*xmm\[0-9\]\[^\n\]*xmm\[0-9\]\[^\n\]*{%k\[1-7\]}(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vpscatterqq\[ \\t\]+\[^\{\n\]*ymm\[0-9\]\[^\n\]*ymm\[0-9\]\[^\n\]*{%k\[1-7\]}(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vpscatterdq\[ \\t\]+\[^\{\n\]*xmm\[0-9\]\[^\n\]*xmm\[0-9\]\[^\n\]*{%k\[1-7\]}(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vpscatterdq\[ \\t\]+\[^\{\n\]*ymm\[0-9\]\[^\n\]*xmm\[0-9\]\[^\n\]*{%k\[1-7\]}(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vscatterqps\[ \\t\]+\[^\{\n\]*xmm\[0-9\]\[^\n\]*xmm\[0-9\]\[^\n\]*{%k\[1-7\]}(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vscatterqps\[ \\t\]+\[^\{\n\]*xmm\[0-9\]\[^\n\]*ymm\[0-9\]\[^\n\]*{%k\[1-7\]}(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vscatterdps\[ \\t\]+\[^\{\n\]*xmm\[0-9\]\[^\n\]*xmm\[0-9\]\[^\n\]*{%k\[1-7\]}(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vscatterdps\[ \\t\]+\[^\{\n\]*ymm\[0-9\]\[^\n\]*ymm\[0-9\]\[^\n\]*{%k\[1-7\]}(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vscatterqpd\[ \\t\]+\[^\{\n\]*xmm\[0-9\]\[^\n\]*xmm\[0-9\]\[^\n\]*{%k\[1-7\]}(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vscatterqpd\[ \\t\]+\[^\{\n\]*ymm\[0-9\]\[^\n\]*ymm\[0-9\]\[^\n\]*{%k\[1-7\]}(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vscatterdpd\[ \\t\]+\[^\{\n\]*xmm\[0-9\]\[^\n\]*xmm\[0-9\]\[^\n\]*{%k\[1-7\]}(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vscatterdpd\[ \\t\]+\[^\{\n\]*ymm\[0-9\]\[^\n\]*xmm\[0-9\]\[^\n\]*{%k\[1-7\]}(?:\n|\[ \\t\]+#)" 2 } } */ + + +#include <immintrin.h> + +void two_scatters_epi32(void* addr, __mmask8 k1, __mmask8 k2, __m128i vindex1, + __m256i vindex2, __m128i src_epi32, + __m256i src_i32_epi32) +{ + _mm_mask_i64scatter_epi32(addr, k1, vindex1, src_epi32, 1); + _mm_mask_i64scatter_epi32(addr, k2, vindex1, src_epi32, 1); + _mm256_mask_i64scatter_epi32(addr, k1, vindex2, src_epi32, 1); + _mm256_mask_i64scatter_epi32(addr, k2, vindex2, src_epi32, 1); + + _mm_mask_i32scatter_epi32(addr, k1, vindex1, src_epi32, 1); + _mm_mask_i32scatter_epi32(addr, k2, vindex1, src_epi32, 1); + _mm256_mask_i32scatter_epi32(addr, k1, vindex2, src_i32_epi32, 1); + _mm256_mask_i32scatter_epi32(addr, k2, vindex2, src_i32_epi32, 1); +} + +void two_scatters_epi64(void* addr, __mmask8 k1, __mmask8 k2, __m128i vindex1, + __m256i vindex2, __m128i src_epi64_mm, + __m256i src_epi64) +{ + _mm_mask_i64scatter_epi64(addr, k1, vindex1, src_epi64_mm, 1); + _mm_mask_i64scatter_epi64(addr, k2, vindex1, src_epi64_mm, 1); + _mm256_mask_i64scatter_epi64(addr, k1, vindex2, src_epi64, 1); + _mm256_mask_i64scatter_epi64(addr, k2, vindex2, src_epi64, 1); + + _mm_mask_i32scatter_epi64(addr, k1, vindex1, src_epi64_mm, 8); + _mm_mask_i32scatter_epi64(addr, k2, vindex1, src_epi64_mm, 8); + _mm256_mask_i32scatter_epi64(addr, k1, vindex1, src_epi64, 1); + _mm256_mask_i32scatter_epi64(addr, k2, vindex1, src_epi64, 1); +} +void two_scatters_ps(void* addr, __mmask8 k1, __mmask8 k2, __m128i vindex1, + __m256i vindex2, __m128 src_ps, __m256 src_i32_ps) +{ + _mm_mask_i64scatter_ps(addr, k1, vindex1, src_ps, 1); + _mm_mask_i64scatter_ps(addr, k2, vindex1, src_ps, 1); + _mm256_mask_i64scatter_ps(addr, k1, vindex2, src_ps, 1); + _mm256_mask_i64scatter_ps(addr, k2, vindex2, src_ps, 1); + + _mm_mask_i32scatter_ps(addr, k1, vindex1, src_ps, 8); + _mm_mask_i32scatter_ps(addr, k2, vindex1, src_ps, 8); + _mm256_mask_i32scatter_ps(addr, k1, vindex2, src_i32_ps, 1); + _mm256_mask_i32scatter_ps(addr, k2, vindex2, src_i32_ps, 1); +} + +void two_scatters_pd(void* addr, __mmask8 k1, __mmask8 k2, __m128i vindex1, + __m256i vindex2, __m128d src_pd_mm, __m256d src_pd) +{ + _mm_mask_i64scatter_pd(addr, k1, vindex1, src_pd_mm, 1); + _mm_mask_i64scatter_pd(addr, k2, vindex1, src_pd_mm, 1); + _mm256_mask_i64scatter_pd(addr, k1, vindex2, src_pd, 1); + _mm256_mask_i64scatter_pd(addr, k2, vindex2, src_pd, 1); + + _mm_mask_i32scatter_pd(addr, k1, vindex1, src_pd_mm, 8); + _mm_mask_i32scatter_pd(addr, k2, vindex1, src_pd_mm, 8); + _mm256_mask_i32scatter_pd(addr, k1, vindex1, src_pd, 1); + _mm256_mask_i32scatter_pd(addr, k2, vindex1, src_pd, 1); +} diff --git a/gcc/testsuite/gcc.target/i386/pr43147.c b/gcc/testsuite/gcc.target/i386/pr43147.c new file mode 100644 index 0000000..3c30f91 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/pr43147.c @@ -0,0 +1,15 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -msse2" } */ +/* { dg-final { scan-assembler "movaps" } } */ +/* { dg-final { scan-assembler-not "shufps" } } */ + +#include <x86intrin.h> + +__m128 +foo (void) +{ + __m128 m = _mm_set_ps(1.0f, 2.0f, 3.0f, 4.0f); + m = _mm_shuffle_ps(m, m, 0xC9); + m = _mm_shuffle_ps(m, m, 0x2D); + return m; +} diff --git a/gcc/testsuite/gcc.target/powerpc/dive-vectorize-1.c b/gcc/testsuite/gcc.target/powerpc/dive-vectorize-1.c new file mode 100644 index 0000000..84f1b0a --- /dev/null +++ b/gcc/testsuite/gcc.target/powerpc/dive-vectorize-1.c @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target power10_ok } */ +/* { dg-options "-mdejagnu-cpu=power10 -O2 -ftree-vectorize -fno-vect-cost-model -fno-unroll-loops -fdump-tree-vect-details" } */ + +/* Test if signed/unsigned int extended divisions get vectorized. */ + +#include "dive-vectorize-1.h" + +/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 2 "vect" } } */ +/* { dg-final { scan-assembler-times {\mvdivesw\M} 1 } } */ +/* { dg-final { scan-assembler-times {\mvdiveuw\M} 1 } } */ diff --git a/gcc/testsuite/gcc.target/powerpc/dive-vectorize-1.h b/gcc/testsuite/gcc.target/powerpc/dive-vectorize-1.h new file mode 100644 index 0000000..119f637 --- /dev/null +++ b/gcc/testsuite/gcc.target/powerpc/dive-vectorize-1.h @@ -0,0 +1,22 @@ +#define N 128 + +typedef signed int si; +typedef unsigned int ui; + +si si_a[N], si_b[N], si_c[N]; +ui ui_a[N], ui_b[N], ui_c[N]; + +__attribute__ ((noipa)) void +test_divwe () +{ + for (int i = 0; i < N; i++) + si_c[i] = __builtin_divwe (si_a[i], si_b[i]); +} + +__attribute__ ((noipa)) void +test_divweu () +{ + for (int i = 0; i < N; i++) + ui_c[i] = __builtin_divweu (ui_a[i], ui_b[i]); +} + diff --git a/gcc/testsuite/gcc.target/powerpc/dive-vectorize-2.c b/gcc/testsuite/gcc.target/powerpc/dive-vectorize-2.c new file mode 100644 index 0000000..13d768d --- /dev/null +++ b/gcc/testsuite/gcc.target/powerpc/dive-vectorize-2.c @@ -0,0 +1,13 @@ +/* { dg-do compile } */ +/* We scan for vdive*d which are only supported on 64-bit env. */ +/* { dg-require-effective-target lp64 } */ +/* { dg-require-effective-target power10_ok } */ +/* { dg-options "-mdejagnu-cpu=power10 -O2 -ftree-vectorize -fno-vect-cost-model -fno-unroll-loops -fdump-tree-vect-details" } */ + +/* Test if signed/unsigned long long extended divisions get vectorized. */ + +#include "dive-vectorize-2.h" + +/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 2 "vect" } } */ +/* { dg-final { scan-assembler-times {\mvdivesd\M} 1 } } */ +/* { dg-final { scan-assembler-times {\mvdiveud\M} 1 } } */ diff --git a/gcc/testsuite/gcc.target/powerpc/dive-vectorize-2.h b/gcc/testsuite/gcc.target/powerpc/dive-vectorize-2.h new file mode 100644 index 0000000..1cab56b --- /dev/null +++ b/gcc/testsuite/gcc.target/powerpc/dive-vectorize-2.h @@ -0,0 +1,22 @@ +#define N 128 + +typedef signed long long sLL; +typedef unsigned long long uLL; + +sLL sll_a[N], sll_b[N], sll_c[N]; +uLL ull_a[N], ull_b[N], ull_c[N]; + +__attribute__ ((noipa)) void +test_divde () +{ + for (int i = 0; i < N; i++) + sll_c[i] = __builtin_divde (sll_a[i], sll_b[i]); +} + +__attribute__ ((noipa)) void +test_divdeu () +{ + for (int i = 0; i < N; i++) + ull_c[i] = __builtin_divdeu (ull_a[i], ull_b[i]); +} + diff --git a/gcc/testsuite/gcc.target/powerpc/dive-vectorize-run-1.c b/gcc/testsuite/gcc.target/powerpc/dive-vectorize-run-1.c new file mode 100644 index 0000000..dab112c --- /dev/null +++ b/gcc/testsuite/gcc.target/powerpc/dive-vectorize-run-1.c @@ -0,0 +1,54 @@ +/* { dg-do run } */ +/* { dg-require-effective-target power10_hw } */ +/* { dg-options "-mdejagnu-cpu=power10 -O2 -ftree-vectorize -fno-vect-cost-model" } */ + +#include "dive-vectorize-1.h" + +/* Check if test cases with signed/unsigned int extended division + vectorization run successfully. */ + +/* Make optimize (1) to avoid vectorization applied on check func. */ + +__attribute__ ((optimize (1))) void +check_divwe () +{ + test_divwe (); + for (int i = 0; i < N; i++) + { + si exp = __builtin_divwe (si_a[i], si_b[i]); + if (exp != si_c[i]) + __builtin_abort (); + } +} + +__attribute__ ((optimize (1))) void +check_divweu () +{ + test_divweu (); + for (int i = 0; i < N; i++) + { + ui exp = __builtin_divweu (ui_a[i], ui_b[i]); + if (exp != ui_c[i]) + __builtin_abort (); + } +} + +int +main () +{ + for (int i = 0; i < N; i++) + { + si_a[i] = 0x10 * (i * 3 + 2); + si_b[i] = 0x7890 * (i * 3 + 1); + ui_a[i] = 0x234 * (i * 11 + 3) - 0xcd * (i * 5 - 7); + ui_b[i] = 0x6078 * (i * 7 + 3) + 0xef * (i * 7 - 11); + if (si_b[i] == 0 || ui_b[i] == 0) + __builtin_abort (); + } + + check_divwe (); + check_divweu (); + + return 0; +} + diff --git a/gcc/testsuite/gcc.target/powerpc/dive-vectorize-run-2.c b/gcc/testsuite/gcc.target/powerpc/dive-vectorize-run-2.c new file mode 100644 index 0000000..eb76149 --- /dev/null +++ b/gcc/testsuite/gcc.target/powerpc/dive-vectorize-run-2.c @@ -0,0 +1,56 @@ +/* { dg-do run } */ +/* The checked bifs are only supported on 64-bit env. */ +/* { dg-require-effective-target lp64 } */ +/* { dg-require-effective-target power10_hw } */ +/* { dg-options "-mdejagnu-cpu=power10 -O2 -ftree-vectorize -fno-vect-cost-model" } */ + +#include "dive-vectorize-2.h" + +/* Check if test cases with signed/unsigned int extended division + vectorization run successfully. */ + +/* Make optimize (1) to avoid vectorization applied on check func. */ + +__attribute__ ((optimize (1))) void +check_divde () +{ + test_divde (); + for (int i = 0; i < N; i++) + { + sLL exp = __builtin_divde (sll_a[i], sll_b[i]); + if (exp != sll_c[i]) + __builtin_abort (); + } +} + +__attribute__ ((optimize (1))) void +check_divdeu () +{ + test_divdeu (); + for (int i = 0; i < N; i++) + { + uLL exp = __builtin_divdeu (ull_a[i], ull_b[i]); + if (exp != ull_c[i]) + __builtin_abort (); + } +} + +int +main () +{ + for (int i = 0; i < N; i++) + { + sll_a[i] = 0x102 * (i * 3 + 2); + sll_b[i] = 0x789ab * (i * 3 + 1); + ull_a[i] = 0x2345 * (i * 11 + 3) - 0xcd1 * (i * 5 - 7); + ull_b[i] = 0x6078e * (i * 7 + 3) + 0xefa * (i * 7 - 11); + if (sll_b[i] == 0 || ull_b[i] == 0) + __builtin_abort (); + } + + check_divde (); + check_divdeu (); + + return 0; +} + diff --git a/gcc/testsuite/gcc.target/powerpc/p10-bifs-vectorize-1.c b/gcc/testsuite/gcc.target/powerpc/p10-bifs-vectorize-1.c new file mode 100644 index 0000000..fdbb9eb --- /dev/null +++ b/gcc/testsuite/gcc.target/powerpc/p10-bifs-vectorize-1.c @@ -0,0 +1,16 @@ +/* { dg-do compile } */ +/* What we scan for are only supported on 64-bit env. */ +/* { dg-require-effective-target lp64 } */ +/* { dg-require-effective-target power10_ok } */ +/* { dg-options "-mdejagnu-cpu=power10 -O2 -ftree-vectorize -fno-vect-cost-model -fno-unroll-loops -fdump-tree-vect-details" } */ + +/* Test if some Power10 built-in functions get vectorized. */ + +#include "p10-bifs-vectorize-1.h" + +/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 5 "vect" } } */ +/* { dg-final { scan-assembler-times {\mvcfuged\M} 1 } } */ +/* { dg-final { scan-assembler-times {\mvclzdm\M} 1 } } */ +/* { dg-final { scan-assembler-times {\mvctzdm\M} 1 } } */ +/* { dg-final { scan-assembler-times {\mvpdepd\M} 1 } } */ +/* { dg-final { scan-assembler-times {\mvpextd\M} 1 } } */ diff --git a/gcc/testsuite/gcc.target/powerpc/p10-bifs-vectorize-1.h b/gcc/testsuite/gcc.target/powerpc/p10-bifs-vectorize-1.h new file mode 100644 index 0000000..80b7aac --- /dev/null +++ b/gcc/testsuite/gcc.target/powerpc/p10-bifs-vectorize-1.h @@ -0,0 +1,40 @@ +#define N 32 + +typedef unsigned long long uLL; +uLL ull_a[N], ull_b[N], ull_c[N]; + +__attribute__ ((noipa)) void +test_cfuged () +{ + for (int i = 0; i < N; i++) + ull_c[i] = __builtin_cfuged (ull_a[i], ull_b[i]); +} + +__attribute__ ((noipa)) void +test_cntlzdm () +{ + for (int i = 0; i < N; i++) + ull_c[i] = __builtin_cntlzdm (ull_a[i], ull_b[i]); +} + +__attribute__ ((noipa)) void +test_cnttzdm () +{ + for (int i = 0; i < N; i++) + ull_c[i] = __builtin_cnttzdm (ull_a[i], ull_b[i]); +} + +__attribute__ ((noipa)) void +test_pdepd () +{ + for (int i = 0; i < N; i++) + ull_c[i] = __builtin_pdepd (ull_a[i], ull_b[i]); +} + +__attribute__ ((noipa)) void +test_pextd () +{ + for (int i = 0; i < N; i++) + ull_c[i] = __builtin_pextd (ull_a[i], ull_b[i]); +} + diff --git a/gcc/testsuite/gcc.target/powerpc/p10-bifs-vectorize-run-1.c b/gcc/testsuite/gcc.target/powerpc/p10-bifs-vectorize-run-1.c new file mode 100644 index 0000000..828fbe1 --- /dev/null +++ b/gcc/testsuite/gcc.target/powerpc/p10-bifs-vectorize-run-1.c @@ -0,0 +1,48 @@ +/* { dg-do run } */ +/* The checked bifs are only supported on 64-bit env. */ +/* { dg-require-effective-target lp64 } */ +/* { dg-require-effective-target power10_hw } */ +/* { dg-options "-mdejagnu-cpu=power10 -O2 -ftree-vectorize -fno-vect-cost-model" } */ + +#include "p10-bifs-vectorize-1.h" + +/* Check if vectorized built-in functions run expectedly. */ + +/* Make optimize (1) to avoid vectorization applied on check func. */ + +#define CHECK(name) \ + __attribute__ ((optimize (1))) void check_##name () \ + { \ + test_##name (); \ + for (int i = 0; i < N; i++) \ + { \ + uLL exp = __builtin_##name (ull_a[i], ull_b[i]); \ + if (exp != ull_c[i]) \ + __builtin_abort (); \ + } \ + } + +CHECK (cfuged) +CHECK (cntlzdm) +CHECK (cnttzdm) +CHECK (pdepd) +CHECK (pextd) + +int +main () +{ + for (int i = 0; i < N; i++) + { + ull_a[i] = 0x789a * (i * 11 - 5) - 0xcd1 * (i * 5 - 7); + ull_b[i] = 0xfedc * (i * 7 + 3) + 0x467 * (i * 7 - 11); + } + + check_cfuged (); + check_cntlzdm (); + check_cnttzdm (); + check_pdepd (); + check_pextd (); + + return 0; +} + diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c index 1a94aeb..f4a99ac 100644 --- a/gcc/tree-ssa-ccp.c +++ b/gcc/tree-ssa-ccp.c @@ -1448,6 +1448,34 @@ bit_value_mult_const (signop sgn, int width, *mask = wi::ext (sum_mask, width, sgn); } +/* Fill up to MAX values in the BITS array with values representing + each of the non-zero bits in the value X. Returns the number of + bits in X (capped at the maximum value MAX). For example, an X + value 11, places 1, 2 and 8 in BITS and returns the value 3. */ + +unsigned int +get_individual_bits (widest_int *bits, widest_int x, unsigned int max) +{ + unsigned int count = 0; + while (count < max && x != 0) + { + int bitpos = wi::ctz (x); + bits[count] = wi::lshift (1, bitpos); + x ^= bits[count]; + count++; + } + return count; +} + +/* Array of 2^N - 1 values representing the bits flipped between + consecutive Gray codes. This is used to efficiently enumerate + all permutations on N bits using XOR. */ +static const unsigned char gray_code_bit_flips[63] = { + 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, + 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, + 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, + 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 +}; /* Apply the operation CODE in type TYPE to the value, mask pairs R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE @@ -1525,6 +1553,48 @@ bit_value_binop (enum tree_code code, signop sgn, int width, } } } + else if (wi::ltu_p (r2val | r2mask, width) + && wi::popcount (r2mask) <= 4) + { + widest_int bits[4]; + widest_int res_val, res_mask; + widest_int tmp_val, tmp_mask; + widest_int shift = wi::bit_and_not (r2val, r2mask); + unsigned int bit_count = get_individual_bits (bits, r2mask, 4); + unsigned int count = (1 << bit_count) - 1; + + /* Initialize result to rotate by smallest value of shift. */ + if (code == RROTATE_EXPR) + { + res_mask = wi::rrotate (r1mask, shift, width); + res_val = wi::rrotate (r1val, shift, width); + } + else + { + res_mask = wi::lrotate (r1mask, shift, width); + res_val = wi::lrotate (r1val, shift, width); + } + + /* Iterate through the remaining values of shift. */ + for (unsigned int i=0; i<count; i++) + { + shift ^= bits[gray_code_bit_flips[i]]; + if (code == RROTATE_EXPR) + { + tmp_mask = wi::rrotate (r1mask, shift, width); + tmp_val = wi::rrotate (r1val, shift, width); + } + else + { + tmp_mask = wi::lrotate (r1mask, shift, width); + tmp_val = wi::lrotate (r1val, shift, width); + } + /* Accumulate the result. */ + res_mask |= tmp_mask | (res_val ^ tmp_val); + } + *val = wi::bit_and_not (res_val, res_mask); + *mask = res_mask; + } break; case LSHIFT_EXPR: @@ -1556,6 +1626,96 @@ bit_value_binop (enum tree_code code, signop sgn, int width, } } } + else if (wi::ltu_p (r2val | r2mask, width)) + { + if (wi::popcount (r2mask) <= 4) + { + widest_int bits[4]; + widest_int arg_val, arg_mask; + widest_int res_val, res_mask; + widest_int tmp_val, tmp_mask; + widest_int shift = wi::bit_and_not (r2val, r2mask); + unsigned int bit_count = get_individual_bits (bits, r2mask, 4); + unsigned int count = (1 << bit_count) - 1; + + /* Initialize result to shift by smallest value of shift. */ + if (code == RSHIFT_EXPR) + { + arg_mask = wi::ext (r1mask, width, sgn); + arg_val = wi::ext (r1val, width, sgn); + res_mask = wi::rshift (arg_mask, shift, sgn); + res_val = wi::rshift (arg_val, shift, sgn); + } + else + { + arg_mask = r1mask; + arg_val = r1val; + res_mask = arg_mask << shift; + res_val = arg_val << shift; + } + + /* Iterate through the remaining values of shift. */ + for (unsigned int i=0; i<count; i++) + { + shift ^= bits[gray_code_bit_flips[i]]; + if (code == RSHIFT_EXPR) + { + tmp_mask = wi::rshift (arg_mask, shift, sgn); + tmp_val = wi::rshift (arg_val, shift, sgn); + } + else + { + tmp_mask = arg_mask << shift; + tmp_val = arg_val << shift; + } + /* Accumulate the result. */ + res_mask |= tmp_mask | (res_val ^ tmp_val); + } + res_mask = wi::ext (res_mask, width, sgn); + res_val = wi::ext (res_val, width, sgn); + *val = wi::bit_and_not (res_val, res_mask); + *mask = res_mask; + } + else if ((r1val | r1mask) == 0) + { + /* Handle shifts of zero to avoid undefined wi::ctz below. */ + *mask = 0; + *val = 0; + } + else if (code == LSHIFT_EXPR) + { + widest_int tmp = wi::mask <widest_int> (width, false); + tmp <<= wi::ctz (r1val | r1mask); + tmp <<= wi::bit_and_not (r2val, r2mask); + *mask = wi::ext (tmp, width, sgn); + *val = 0; + } + else if (!wi::neg_p (r1val | r1mask, sgn)) + { + /* Logical right shift, or zero sign bit. */ + widest_int arg = r1val | r1mask; + int lzcount = wi::clz (arg); + lzcount -= wi::get_precision (arg) - width; + widest_int tmp = wi::mask <widest_int> (width, false); + tmp = wi::lrshift (tmp, lzcount); + tmp = wi::lrshift (tmp, wi::bit_and_not (r2val, r2mask)); + *mask = wi::ext (tmp, width, sgn); + *val = 0; + } + else if (!wi::neg_p (r1mask)) + { + /* Arithmetic right shift with set sign bit. */ + widest_int arg = wi::bit_and_not (r1val, r1mask); + int sbcount = wi::clrsb (arg); + sbcount -= wi::get_precision (arg) - width; + widest_int tmp = wi::mask <widest_int> (width, false); + tmp = wi::lrshift (tmp, sbcount); + tmp = wi::lrshift (tmp, wi::bit_and_not (r2val, r2mask)); + *mask = wi::sext (tmp, width); + tmp = wi::bit_not (tmp); + *val = wi::sext (tmp, width); + } + } break; case PLUS_EXPR: diff --git a/gcc/tree-ssa-uninit.c b/gcc/tree-ssa-uninit.c index ad2cf48..394dbf4 100644 --- a/gcc/tree-ssa-uninit.c +++ b/gcc/tree-ssa-uninit.c @@ -131,20 +131,19 @@ uninit_undefined_value_p (tree t) again for plain uninitialized variables, since optimization may have changed conditionally uninitialized to unconditionally uninitialized. */ -/* Emit a warning for EXPR based on variable VAR at the point in the - program T, an SSA_NAME, is used being uninitialized. The exact - warning text is in MSGID and DATA is the gimple stmt with info about - the location in source code. When DATA is a GIMPLE_PHI, PHIARG_IDX - gives which argument of the phi node to take the location from. WC - is the warning code. */ +/* Emit warning OPT for variable VAR at the point in the program where + the SSA_NAME T is being used uninitialized. The warning text is in + MSGID and STMT is the statement that does the uninitialized read. + PHI_ARG_LOC is the location of the PHI argument if T and VAR are one, + or UNKNOWN_LOCATION otherwise. */ static void -warn_uninit (enum opt_code wc, tree t, tree expr, tree var, - const char *gmsgid, void *data, location_t phiarg_loc) +warn_uninit (opt_code opt, tree t, tree var, const char *gmsgid, + gimple *context, location_t phi_arg_loc = UNKNOWN_LOCATION) { - gimple *context = (gimple *) data; - location_t location, cfun_loc; - expanded_location xloc, floc; + /* Bail if the value isn't provably uninitialized. */ + if (!has_undefined_value_p (t)) + return; /* Ignore COMPLEX_EXPR as initializing only a part of a complex turns in a COMPLEX_EXPR with the not initialized part being @@ -152,65 +151,69 @@ warn_uninit (enum opt_code wc, tree t, tree expr, tree var, if (is_gimple_assign (context) && gimple_assign_rhs_code (context) == COMPLEX_EXPR) return; - if (!has_undefined_value_p (t)) - return; - /* Anonymous SSA_NAMEs shouldn't be uninitialized, but ssa_undefined_value_p - can return true if the def stmt of anonymous SSA_NAME is COMPLEX_EXPR + can return true if the def stmt of an anonymous SSA_NAME is COMPLEX_EXPR created for conversion from scalar to complex. Use the underlying var of the COMPLEX_EXPRs real part in that case. See PR71581. */ - if (expr == NULL_TREE - && var == NULL_TREE - && SSA_NAME_VAR (t) == NULL_TREE - && is_gimple_assign (SSA_NAME_DEF_STMT (t)) - && gimple_assign_rhs_code (SSA_NAME_DEF_STMT (t)) == COMPLEX_EXPR) - { - tree v = gimple_assign_rhs1 (SSA_NAME_DEF_STMT (t)); - if (TREE_CODE (v) == SSA_NAME - && has_undefined_value_p (v) - && zerop (gimple_assign_rhs2 (SSA_NAME_DEF_STMT (t)))) + if (!var && !SSA_NAME_VAR (t)) + { + gimple *def_stmt = SSA_NAME_DEF_STMT (t); + if (is_gimple_assign (def_stmt) + && gimple_assign_rhs_code (def_stmt) == COMPLEX_EXPR) { - expr = SSA_NAME_VAR (v); - var = expr; + tree v = gimple_assign_rhs1 (def_stmt); + if (TREE_CODE (v) == SSA_NAME + && has_undefined_value_p (v) + && zerop (gimple_assign_rhs2 (def_stmt))) + var = SSA_NAME_VAR (v); } } - if (expr == NULL_TREE) + if (var == NULL_TREE) return; - /* TREE_NO_WARNING either means we already warned, or the front end - wishes to suppress the warning. */ - if ((context - && (warning_suppressed_p (context, OPT_Wuninitialized) - || (gimple_assign_single_p (context) - && get_no_uninit_warning (gimple_assign_rhs1 (context))))) - || get_no_uninit_warning (expr)) + /* Avoid warning if we've already done so or if the warning has been + suppressed. */ + if (((warning_suppressed_p (context, OPT_Wuninitialized) + || (gimple_assign_single_p (context) + && get_no_uninit_warning (gimple_assign_rhs1 (context))))) + || get_no_uninit_warning (var)) return; - if (context != NULL && gimple_has_location (context)) + /* Use either the location of the read statement or that of the PHI + argument, or that of the uninitialized variable, in that order, + whichever is valid. */ + location_t location; + if (gimple_has_location (context)) location = gimple_location (context); - else if (phiarg_loc != UNKNOWN_LOCATION) - location = phiarg_loc; + else if (phi_arg_loc != UNKNOWN_LOCATION) + location = phi_arg_loc; else location = DECL_SOURCE_LOCATION (var); location = linemap_resolve_location (line_table, location, LRK_SPELLING_LOCATION, NULL); - cfun_loc = DECL_SOURCE_LOCATION (cfun->decl); - xloc = expand_location (location); - floc = expand_location (cfun_loc); + auto_diagnostic_group d; - if (warning_at (location, wc, gmsgid, expr)) - { - suppress_warning (expr, wc); + if (!warning_at (location, opt, gmsgid, var)) + return; - if (location == DECL_SOURCE_LOCATION (var)) - return; - if (xloc.file != floc.file - || linemap_location_before_p (line_table, location, cfun_loc) - || linemap_location_before_p (line_table, cfun->function_end_locus, + /* Avoid subsequent warnings for reads of the same variable again. */ + suppress_warning (var, opt); + + /* Issue a note pointing to the read variable unless the warning + is at the same location. */ + location_t var_loc = DECL_SOURCE_LOCATION (var); + if (location == var_loc) + return; + + location_t cfun_loc = DECL_SOURCE_LOCATION (cfun->decl); + expanded_location xloc = expand_location (location); + expanded_location floc = expand_location (cfun_loc); + if (xloc.file != floc.file + || linemap_location_before_p (line_table, location, cfun_loc) + || linemap_location_before_p (line_table, cfun->function_end_locus, location)) - inform (DECL_SOURCE_LOCATION (var), "%qD was declared here", var); - } + inform (var_loc, "%qD was declared here", var); } struct check_defs_data @@ -845,13 +848,14 @@ warn_uninit_phi_uses (basic_block bb) } if (use_stmt) warn_uninit (OPT_Wuninitialized, def, SSA_NAME_VAR (def), - SSA_NAME_VAR (def), - "%qD is used uninitialized", use_stmt, - UNKNOWN_LOCATION); + "%qD is used uninitialized", use_stmt); } } -static unsigned int +/* Issue warnings about reads of uninitialized variables. WMAYBE_UNINIT + is true to issue -Wmaybe-uninitialized, otherwise -Wuninitialized. */ + +static void warn_uninitialized_vars (bool wmaybe_uninit) { /* Counters and limits controlling the the depth of the warning. */ @@ -871,15 +875,13 @@ warn_uninitialized_vars (bool wmaybe_uninit) for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *stmt = gsi_stmt (gsi); - use_operand_p use_p; - ssa_op_iter op_iter; - tree use; - if (is_gimple_debug (stmt)) continue; /* We only do data flow with SSA_NAMEs, so that's all we can warn about. */ + use_operand_p use_p; + ssa_op_iter op_iter; FOR_EACH_SSA_USE_OPERAND (use_p, stmt, op_iter, SSA_OP_USE) { /* BIT_INSERT_EXPR first operand should not be considered @@ -890,17 +892,13 @@ warn_uninitialized_vars (bool wmaybe_uninit) && use_p->use == gimple_assign_rhs1_ptr (ass)) continue; } - use = USE_FROM_PTR (use_p); + tree use = USE_FROM_PTR (use_p); if (wlims.always_executed) warn_uninit (OPT_Wuninitialized, use, SSA_NAME_VAR (use), - SSA_NAME_VAR (use), - "%qD is used uninitialized", stmt, - UNKNOWN_LOCATION); + "%qD is used uninitialized", stmt); else if (wmaybe_uninit) warn_uninit (OPT_Wmaybe_uninitialized, use, SSA_NAME_VAR (use), - SSA_NAME_VAR (use), - "%qD may be used uninitialized", - stmt, UNKNOWN_LOCATION); + "%qD may be used uninitialized", stmt); } /* For limiting the alias walk below we count all @@ -930,8 +928,6 @@ warn_uninitialized_vars (bool wmaybe_uninit) } } } - - return 0; } /* Checks if the operand OPND of PHI is defined by @@ -943,16 +939,13 @@ warn_uninitialized_vars (bool wmaybe_uninit) static bool can_skip_redundant_opnd (tree opnd, gimple *phi) { - gimple *op_def; - tree phi_def; - int i, n; - - phi_def = gimple_phi_result (phi); - op_def = SSA_NAME_DEF_STMT (opnd); + tree phi_def = gimple_phi_result (phi); + gimple *op_def = SSA_NAME_DEF_STMT (opnd); if (gimple_code (op_def) != GIMPLE_PHI) return false; - n = gimple_phi_num_args (op_def); - for (i = 0; i < n; ++i) + + unsigned n = gimple_phi_num_args (op_def); + for (unsigned i = 0; i < n; ++i) { tree op = gimple_phi_arg_def (op_def, i); if (TREE_CODE (op) != SSA_NAME) @@ -970,15 +963,14 @@ can_skip_redundant_opnd (tree opnd, gimple *phi) static unsigned compute_uninit_opnds_pos (gphi *phi) { - size_t i, n; unsigned uninit_opnds = 0; - n = gimple_phi_num_args (phi); + unsigned n = gimple_phi_num_args (phi); /* Bail out for phi with too many args. */ if (n > max_phi_args) return 0; - for (i = 0; i < n; ++i) + for (unsigned i = 0; i < n; ++i) { tree op = gimple_phi_arg_def (phi, i); if (TREE_CODE (op) == SSA_NAME @@ -998,8 +990,7 @@ compute_uninit_opnds_pos (gphi *phi) return uninit_opnds; } -/* Find the immediate postdominator PDOM of the specified - basic block BLOCK. */ +/* Find the immediate postdominator of the specified basic block BLOCK. */ static inline basic_block find_pdom (basic_block block) @@ -1015,7 +1006,7 @@ find_pdom (basic_block block) } } -/* Find the immediate DOM of the specified basic block BLOCK. */ +/* Find the immediate dominator of the specified basic block BLOCK. */ static inline basic_block find_dom (basic_block block) @@ -1053,9 +1044,7 @@ is_non_loop_exit_postdominating (basic_block bb1, basic_block bb2) static inline basic_block find_control_equiv_block (basic_block bb) { - basic_block pdom; - - pdom = find_pdom (bb); + basic_block pdom = find_pdom (bb); /* Skip the postdominating bb that is also loop exit. */ if (!is_non_loop_exit_postdominating (pdom, bb)) @@ -3193,18 +3182,11 @@ static void warn_uninitialized_phi (gphi *phi, vec<gphi *> *worklist, hash_set<gphi *> *added_to_worklist) { - unsigned uninit_opnds; - gimple *uninit_use_stmt = 0; - tree uninit_op; - int phiarg_index; - location_t loc; - /* Don't look at virtual operands. */ if (virtual_operand_p (gimple_phi_result (phi))) return; - uninit_opnds = compute_uninit_opnds_pos (phi); - + unsigned uninit_opnds = compute_uninit_opnds_pos (phi); if (MASK_EMPTY (uninit_opnds)) return; @@ -3215,25 +3197,23 @@ warn_uninitialized_phi (gphi *phi, vec<gphi *> *worklist, } /* Now check if we have any use of the value without proper guard. */ - uninit_use_stmt = find_uninit_use (phi, uninit_opnds, - worklist, added_to_worklist); + gimple *uninit_use_stmt = find_uninit_use (phi, uninit_opnds, + worklist, added_to_worklist); /* All uses are properly guarded. */ if (!uninit_use_stmt) return; - phiarg_index = MASK_FIRST_SET_BIT (uninit_opnds); - uninit_op = gimple_phi_arg_def (phi, phiarg_index); + int phiarg_index = MASK_FIRST_SET_BIT (uninit_opnds); + tree uninit_op = gimple_phi_arg_def (phi, phiarg_index); if (SSA_NAME_VAR (uninit_op) == NULL_TREE) return; - if (gimple_phi_arg_has_location (phi, phiarg_index)) - loc = gimple_phi_arg_location (phi, phiarg_index); - else - loc = UNKNOWN_LOCATION; - warn_uninit (OPT_Wmaybe_uninitialized, uninit_op, SSA_NAME_VAR (uninit_op), + + location_t phi_arg_loc = gimple_phi_arg_location (phi, phiarg_index); + warn_uninit (OPT_Wmaybe_uninitialized, uninit_op, SSA_NAME_VAR (uninit_op), "%qD may be used uninitialized in this function", - uninit_use_stmt, loc); + uninit_use_stmt, phi_arg_loc); } static bool diff --git a/libgcc/ChangeLog b/libgcc/ChangeLog index 32556aa..e47bade 100644 --- a/libgcc/ChangeLog +++ b/libgcc/ChangeLog @@ -1,3 +1,16 @@ +2021-08-26 David Edelsohn <dje.gcc@gmail.com> + + * config/rs6000/ibm-ldouble.c (ldouble_qadd_internal): Rename from + __gcc_qadd. + (__gcc_qadd): Call ldouble_qadd_internal. + (__gcc_qsub): Call ldouble_qadd_internal with second long double + argument negated. + +2021-08-26 Jonathan Yong <10walls@gmail.com> + + * config/i386/cygming-crtend.c: Fix register_frame_ctor + and register_frame_dtor warnings. + 2021-08-24 Richard Earnshaw <rearnsha@arm.com> PR target/102035 diff --git a/libgcc/config/rs6000/ibm-ldouble.c b/libgcc/config/rs6000/ibm-ldouble.c index 4c13453..0b385aa 100644 --- a/libgcc/config/rs6000/ibm-ldouble.c +++ b/libgcc/config/rs6000/ibm-ldouble.c @@ -118,8 +118,8 @@ pack_ldouble (double dh, double dl) } /* Add two 'IBM128_TYPE' values and return the result. */ -IBM128_TYPE -__gcc_qadd (double a, double aa, double c, double cc) +static inline IBM128_TYPE +ldouble_qadd_internal (double a, double aa, double c, double cc) { double xh, xl, z, q, zz; @@ -158,9 +158,15 @@ __gcc_qadd (double a, double aa, double c, double cc) } IBM128_TYPE -__gcc_qsub (double a, double b, double c, double d) +__gcc_qadd (double a, double aa, double c, double cc) +{ + return ldouble_qadd_internal (a, aa, c, cc); +} + +IBM128_TYPE +__gcc_qsub (double a, double aa, double c, double cc) { - return __gcc_qadd (a, b, -c, -d); + return ldouble_qadd_internal (a, aa, -c, -cc); } #ifdef __NO_FPRS__ diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog index 70f97a5..7543480 100644 --- a/libstdc++-v3/ChangeLog +++ b/libstdc++-v3/ChangeLog @@ -1,3 +1,45 @@ +2021-08-26 Jonathan Wakely <jwakely@redhat.com> + + * include/bits/std_function.h (function): Adjust doxygen + comments. + * include/bits/unique_ptr.h (make_unique_for_overwrite): + Change parameter name to match doxygen comment. + +2021-08-26 Jonathan Wakely <jwakely@redhat.com> + + * include/bits/std_function.h (_function_base::_Base_manager): + Replace _M_init_functor with a function template using a + forwarding reference, and a pair of _M_create function + templates. Reuse _M_create for the clone operation. + (function::_Decay_t): New alias template. + (function::_Callable): Simplify by using _Decay. + (function::function(F)): Change parameter to forwarding + reference, as per LWG 2447. Add noexcept-specifier. Simplify + constraints. + (function::operator=(F&&)): Add noexcept-specifier. + * testsuite/20_util/function/cons/lwg2774.cc: New test. + * testsuite/20_util/function/cons/noexcept.cc: New test. + +2021-08-26 Jonathan Wakely <jwakely@redhat.com> + + * include/bits/std_function.h (function::function(F)): Add + static assertions to check constructibility requirements. + +2021-08-26 Jonathan Wakely <jwakely@redhat.com> + + PR libstdc++/100285 + * configure.ac: Check for O_NONBLOCK. + * configure: Regenerate. + * include/experimental/internet: Include <ws2tcpip.h> for + Windows. Use preprocessor conditions around more constants. + * include/experimental/socket: Use preprocessor conditions + around more constants. + * testsuite/experimental/net/internet/resolver/base.cc: Only use + constants when the corresponding C macro is defined. + * testsuite/experimental/net/socket/basic_socket.cc: Likewise. + * testsuite/experimental/net/socket/socket_base.cc: Likewise. + Make preprocessor checks more fine-grained. + 2021-08-25 Jonathan Wakely <jwakely@redhat.com> * testsuite/17_intro/names.cc: Check 'sz'. diff --git a/libstdc++-v3/configure b/libstdc++-v3/configure index 068a203..138b99f4 100755 --- a/libstdc++-v3/configure +++ b/libstdc++-v3/configure @@ -2701,6 +2701,52 @@ $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_cxx_check_type + +# ac_fn_c_check_decl LINENO SYMBOL VAR INCLUDES +# --------------------------------------------- +# Tests whether SYMBOL is declared in INCLUDES, setting cache variable VAR +# accordingly. +ac_fn_c_check_decl () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + as_decl_name=`echo $2|sed 's/ *(.*//'` + as_decl_use=`echo $2|sed -e 's/(/((/' -e 's/)/) 0&/' -e 's/,/) 0& (/g'` + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $as_decl_name is declared" >&5 +$as_echo_n "checking whether $as_decl_name is declared... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +#ifndef $as_decl_name +#ifdef __cplusplus + (void) $as_decl_use; +#else + (void) $as_decl_name; +#endif +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_decl cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. @@ -12130,7 +12176,7 @@ else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF -#line 12133 "configure" +#line 12179 "configure" #include "confdefs.h" #if HAVE_DLFCN_H @@ -12236,7 +12282,7 @@ else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF -#line 12239 "configure" +#line 12285 "configure" #include "confdefs.h" #if HAVE_DLFCN_H @@ -15918,7 +15964,7 @@ $as_echo "$glibcxx_cv_atomic_long_long" >&6; } # Fake what AC_TRY_COMPILE does. cat > conftest.$ac_ext << EOF -#line 15921 "configure" +#line 15967 "configure" int main() { typedef bool atomic_type; @@ -15953,7 +15999,7 @@ $as_echo "$glibcxx_cv_atomic_bool" >&6; } rm -f conftest* cat > conftest.$ac_ext << EOF -#line 15956 "configure" +#line 16002 "configure" int main() { typedef short atomic_type; @@ -15988,7 +16034,7 @@ $as_echo "$glibcxx_cv_atomic_short" >&6; } rm -f conftest* cat > conftest.$ac_ext << EOF -#line 15991 "configure" +#line 16037 "configure" int main() { // NB: _Atomic_word not necessarily int. @@ -16024,7 +16070,7 @@ $as_echo "$glibcxx_cv_atomic_int" >&6; } rm -f conftest* cat > conftest.$ac_ext << EOF -#line 16027 "configure" +#line 16073 "configure" int main() { typedef long long atomic_type; @@ -16177,7 +16223,7 @@ $as_echo "mutex" >&6; } # unnecessary for this test. cat > conftest.$ac_ext << EOF -#line 16180 "configure" +#line 16226 "configure" int main() { _Decimal32 d1; @@ -16219,7 +16265,7 @@ ac_compiler_gnu=$ac_cv_cxx_compiler_gnu # unnecessary for this test. cat > conftest.$ac_ext << EOF -#line 16222 "configure" +#line 16268 "configure" template<typename T1, typename T2> struct same { typedef T2 type; }; @@ -76614,6 +76660,26 @@ fi done +ac_fn_c_check_decl "$LINENO" "F_GETFL" "ac_cv_have_decl_F_GETFL" "fcntl.h +" +if test "x$ac_cv_have_decl_F_GETFL" = xyes; then : + +fi + +ac_fn_c_check_decl "$LINENO" "F_SETFL" "ac_cv_have_decl_F_SETFL" "fcntl.h +" +if test "x$ac_cv_have_decl_F_SETFL" = xyes; then : + +fi + +if "$ac_cv_have_decl_F_GETFL$ac_cv_have_decl_F_SETFL" = 11 ; then + ac_fn_c_check_decl "$LINENO" "O_NONBLOCK" "ac_cv_have_decl_O_NONBLOCK" "fcntl.h +" +if test "x$ac_cv_have_decl_O_NONBLOCK" = xyes; then : + +fi + +fi # For Transactional Memory TS diff --git a/libstdc++-v3/configure.ac b/libstdc++-v3/configure.ac index 9d70ae7..d29efa6 100644 --- a/libstdc++-v3/configure.ac +++ b/libstdc++-v3/configure.ac @@ -481,6 +481,11 @@ GLIBCXX_CHECK_FILESYSTEM_DEPS # For Networking TS. AC_CHECK_HEADERS([fcntl.h sys/ioctl.h sys/socket.h sys/uio.h poll.h netdb.h arpa/inet.h netinet/in.h netinet/tcp.h]) +AC_CHECK_DECL(F_GETFL,[],[],[fcntl.h]) +AC_CHECK_DECL(F_SETFL,[],[],[fcntl.h]) +if [ "$ac_cv_have_decl_F_GETFL$ac_cv_have_decl_F_SETFL" = 11 ]; then + AC_CHECK_DECL(O_NONBLOCK,[],[],[fcntl.h]) +fi # For Transactional Memory TS GLIBCXX_CHECK_SIZE_T_MANGLING diff --git a/libstdc++-v3/include/bits/std_function.h b/libstdc++-v3/include/bits/std_function.h index fb86ff1..82c932e 100644 --- a/libstdc++-v3/include/bits/std_function.h +++ b/libstdc++-v3/include/bits/std_function.h @@ -127,7 +127,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION && __alignof__(_Functor) <= _M_max_align && (_M_max_align % __alignof__(_Functor) == 0)); - typedef integral_constant<bool, __stored_locally> _Local_storage; + using _Local_storage = integral_constant<bool, __stored_locally>; // Retrieve a pointer to the function object static _Functor* @@ -142,32 +142,33 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION return __source._M_access<_Functor*>(); } - // Clone a location-invariant function object that fits within + private: + // Construct a location-invariant function object that fits within // an _Any_data structure. - static void - _M_clone(_Any_data& __dest, const _Any_data& __source, true_type) - { - ::new (__dest._M_access()) _Functor(__source._M_access<_Functor>()); - } + template<typename _Fn> + static void + _M_create(_Any_data& __dest, _Fn&& __f, true_type) + { + ::new (__dest._M_access()) _Functor(std::forward<_Fn>(__f)); + } - // Clone a function object that is not location-invariant or - // that cannot fit into an _Any_data structure. - static void - _M_clone(_Any_data& __dest, const _Any_data& __source, false_type) - { - __dest._M_access<_Functor*>() = - new _Functor(*__source._M_access<const _Functor*>()); - } + // Construct a function object on the heap and store a pointer. + template<typename _Fn> + static void + _M_create(_Any_data& __dest, _Fn&& __f, false_type) + { + __dest._M_access<_Functor*>() + = new _Functor(std::forward<_Fn>(__f)); + } - // Destroying a location-invariant object may still require - // destruction. + // Destroy an object stored in the internal buffer. static void _M_destroy(_Any_data& __victim, true_type) { __victim._M_access<_Functor>().~_Functor(); } - // Destroying an object located on the heap. + // Destroy an object located on the heap. static void _M_destroy(_Any_data& __victim, false_type) { @@ -188,12 +189,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION __dest._M_access<const type_info*>() = nullptr; #endif break; + case __get_functor_ptr: __dest._M_access<_Functor*>() = _M_get_pointer(__source); break; case __clone_functor: - _M_clone(__dest, __source, _Local_storage()); + _M_init_functor(__dest, + *const_cast<const _Functor*>(_M_get_pointer(__source))); break; case __destroy_functor: @@ -203,9 +206,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION return false; } - static void - _M_init_functor(_Any_data& __functor, _Functor&& __f) - { _M_init_functor(__functor, std::move(__f), _Local_storage()); } + template<typename _Fn> + static void + _M_init_functor(_Any_data& __functor, _Fn&& __f) + noexcept(__and_<_Local_storage, + is_nothrow_constructible<_Functor, _Fn>>::value) + { + _M_create(__functor, std::forward<_Fn>(__f), _Local_storage()); + } template<typename _Signature> static bool @@ -226,15 +234,6 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION static bool _M_not_empty_function(const _Tp&) { return true; } - - private: - static void - _M_init_functor(_Any_data& __functor, _Functor&& __f, true_type) - { ::new (__functor._M_access()) _Functor(std::move(__f)); } - - static void - _M_init_functor(_Any_data& __functor, _Functor&& __f, false_type) - { __functor._M_access<_Functor*>() = new _Functor(std::move(__f)); } }; _Function_base() = default; @@ -291,6 +290,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION return std::__invoke_r<_Res>(*_Base::_M_get_pointer(__functor), std::forward<_ArgTypes>(__args)...); } + + template<typename _Fn> + static constexpr bool + _S_nothrow_init() noexcept + { + return __and_<typename _Base::_Local_storage, + is_nothrow_constructible<_Functor, _Fn>>::value; + } }; // Specialization for invalid types @@ -329,19 +336,26 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION : public _Maybe_unary_or_binary_function<_Res, _ArgTypes...>, private _Function_base { + // Equivalent to std::decay_t except that it produces an invalid type + // if the decayed type is the current specialization of std::function. template<typename _Func, - typename _Res2 = __invoke_result<_Func&, _ArgTypes...>> + bool _Self = is_same<__remove_cvref_t<_Func>, function>::value> + using _Decay_t + = typename __enable_if_t<!_Self, decay<_Func>>::type; + + template<typename _Func, + typename _DFunc = _Decay_t<_Func>, + typename _Res2 = __invoke_result<_DFunc&, _ArgTypes...>> struct _Callable : __is_invocable_impl<_Res2, _Res>::type { }; - // Used so the return type convertibility checks aren't done when - // performing overload resolution for copy construction/assignment. - template<typename _Tp> - struct _Callable<function, _Tp> : false_type { }; + template<typename _Cond, typename _Tp = void> + using _Requires = __enable_if_t<_Cond::value, _Tp>; - template<typename _Cond, typename _Tp> - using _Requires = typename enable_if<_Cond::value, _Tp>::type; + template<typename _Functor> + using _Handler + = _Function_handler<_Res(_ArgTypes...), __decay_t<_Functor>>; public: typedef _Res result_type; @@ -416,33 +430,42 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION * If @a __f is a non-NULL function pointer or an object of type @c * reference_wrapper<F>, this function will not throw. */ + // _GLIBCXX_RESOLVE_LIB_DEFECTS + // 2774. std::function construction vs assignment template<typename _Functor, - typename = _Requires<__not_<is_same<_Functor, function>>, void>, - typename = _Requires<_Callable<_Functor>, void>> - function(_Functor __f) + typename = _Requires<_Callable<_Functor>>> + function(_Functor&& __f) + noexcept(_Handler<_Functor>::template _S_nothrow_init<_Functor>()) : _Function_base() { - using _My_handler = _Function_handler<_Res(_ArgTypes...), _Functor>; + static_assert(is_copy_constructible<__decay_t<_Functor>>::value, + "std::function target must be copy-constructible"); + static_assert(is_constructible<__decay_t<_Functor>, _Functor>::value, + "std::function target must be constructible from the " + "constructor argument"); + + using _My_handler = _Handler<_Functor>; if (_My_handler::_M_not_empty_function(__f)) { - _My_handler::_M_init_functor(_M_functor, std::move(__f)); + _My_handler::_M_init_functor(_M_functor, + std::forward<_Functor>(__f)); _M_invoker = &_My_handler::_M_invoke; _M_manager = &_My_handler::_M_manager; } } /** - * @brief %Function assignment operator. + * @brief Function assignment operator. * @param __x A %function with identical call signature. - * @post @c (bool)*this == (bool)x - * @returns @c *this + * @post `(bool)*this == (bool)x` + * @returns `*this` * - * The target of @a __x is copied to @c *this. If @a __x has no - * target, then @c *this will be empty. + * The target of `__x` is copied to `*this`. If `__x` has no + * target, then `*this` will be empty. * - * If @a __x targets a function pointer or a reference to a function - * object, then this operation will not throw an %exception. + * If `__x` targets a function pointer or a reference to a function + * object, then this operation will not throw an exception. */ function& operator=(const function& __x) @@ -452,15 +475,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION } /** - * @brief %Function move-assignment operator. + * @brief Function move-assignment operator. * @param __x A %function rvalue with identical call signature. - * @returns @c *this + * @returns `*this` * - * The target of @a __x is moved to @c *this. If @a __x has no - * target, then @c *this will be empty. + * The target of `__x` is moved to `*this`. If `__x` has no + * target, then `*this` will be empty. * - * If @a __x targets a function pointer or a reference to a function - * object, then this operation will not throw an %exception. + * If `__x` targets a function pointer or a reference to a function + * object, then this operation will not throw an exception. */ function& operator=(function&& __x) noexcept @@ -470,11 +493,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION } /** - * @brief %Function assignment to zero. - * @post @c !(bool)*this - * @returns @c *this + * @brief Function assignment to empty. + * @post `!(bool)*this` + * @returns `*this` * - * The target of @c *this is deallocated, leaving it empty. + * The target of `*this` is deallocated, leaving it empty. */ function& operator=(nullptr_t) noexcept @@ -489,24 +512,26 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION } /** - * @brief %Function assignment to a new target. - * @param __f A %function object that is callable with parameters of - * type @c T1, @c T2, ..., @c TN and returns a value convertible - * to @c Res. - * @return @c *this + * @brief Function assignment to a new target. + * @param __f A function object that is callable with parameters of + * type `_ArgTypes...` and returns a value convertible + * to `_Res`. + * @return `*this` + * @since C++11 * - * This %function object wrapper will target a copy of @a - * __f. If @a __f is @c reference_wrapper<F>, then this function - * object will contain a reference to the function object @c - * __f.get(). If @a __f is a NULL function pointer or NULL - * pointer-to-member, @c this object will be empty. + * This function object wrapper will target a copy of `__f`. If `__f` + * is `reference_wrapper<F>`, then this function object will contain + * a reference to the function object `__f.get()`. If `__f` is a null + * function pointer or null pointer-to-member, this object will be + * empty. * - * If @a __f is a non-NULL function pointer or an object of type @c - * reference_wrapper<F>, this function will not throw. + * If `__f` is a non-null function pointer or an object of type + * `reference_wrapper<F>`, this function will not throw. */ template<typename _Functor> - _Requires<_Callable<typename decay<_Functor>::type>, function&> + _Requires<_Callable<_Functor>, function&> operator=(_Functor&& __f) + noexcept(_Handler<_Functor>::template _S_nothrow_init<_Functor>()) { function(std::forward<_Functor>(__f)).swap(*this); return *this; @@ -527,8 +552,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION * @brief Swap the targets of two %function objects. * @param __x A %function with identical call signature. * - * Swap the targets of @c this function object and @a __f. This - * function will not throw an %exception. + * Swap the targets of `this` function object and `__f`. + * This function will not throw exceptions. */ void swap(function& __x) noexcept { @@ -542,10 +567,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION /** * @brief Determine if the %function wrapper has a target. * - * @return @c true when this %function object contains a target, - * or @c false when it is empty. + * @return `true` when this function object contains a target, + * or `false` when it is empty. * - * This function will not throw an %exception. + * This function will not throw exceptions. */ explicit operator bool() const noexcept { return !_M_empty(); } @@ -553,12 +578,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION // [3.7.2.4] function invocation /** - * @brief Invokes the function targeted by @c *this. + * @brief Invokes the function targeted by `*this`. * @returns the result of the target. - * @throws bad_function_call when @c !(bool)*this + * @throws `bad_function_call` when `!(bool)*this` * * The function call operator invokes the target function object - * stored by @c this. + * stored by `this`. */ _Res operator()(_ArgTypes... __args) const @@ -575,9 +600,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION * wrapper. * * @returns the type identifier of the target function object, or - * @c typeid(void) if @c !(bool)*this. + * `typeid(void)` if `!(bool)*this`. * - * This function will not throw an %exception. + * This function will not throw exceptions. */ const type_info& target_type() const noexcept @@ -597,7 +622,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION * @brief Access the stored target function object. * * @return Returns a pointer to the stored target function object, - * if @c typeid(_Functor).equals(target_type()); otherwise, a null + * if `typeid(_Functor).equals(target_type())`; otherwise, a null * pointer. * * This function does not throw exceptions. @@ -685,11 +710,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION // [20.7.15.2.6] null pointer comparisons /** - * @brief Compares a polymorphic function object wrapper against 0 - * (the NULL pointer). - * @returns @c true if the wrapper has no target, @c false otherwise + * @brief Test whether a polymorphic function object wrapper is empty. + * @returns `true` if the wrapper has no target, `false` otherwise * - * This function will not throw an %exception. + * This function will not throw exceptions. */ template<typename _Res, typename... _Args> inline bool @@ -704,11 +728,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION { return !static_cast<bool>(__f); } /** - * @brief Compares a polymorphic function object wrapper against 0 - * (the NULL pointer). - * @returns @c false if the wrapper has no target, @c true otherwise + * @brief Test whether a polymorphic function object wrapper is non-empty. + * @returns `false` if the wrapper has no target, `true` otherwise * - * This function will not throw an %exception. + * This function will not throw exceptions. */ template<typename _Res, typename... _Args> inline bool @@ -727,7 +750,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION /** * @brief Swap the targets of two polymorphic function object wrappers. * - * This function will not throw an %exception. + * This function will not throw exceptions. */ // _GLIBCXX_RESOLVE_LIB_DEFECTS // 2062. Effect contradictions w/o no-throw guarantee of std::function swaps diff --git a/libstdc++-v3/include/bits/unique_ptr.h b/libstdc++-v3/include/bits/unique_ptr.h index f34ca10..62ec1b5 100644 --- a/libstdc++-v3/include/bits/unique_ptr.h +++ b/libstdc++-v3/include/bits/unique_ptr.h @@ -1027,8 +1027,8 @@ namespace __detail */ template<typename _Tp> inline __detail::__unique_ptr_array_t<_Tp> - make_unique_for_overwrite(size_t __n) - { return unique_ptr<_Tp>(new remove_extent_t<_Tp>[__n]); } + make_unique_for_overwrite(size_t __num) + { return unique_ptr<_Tp>(new remove_extent_t<_Tp>[__num]); } /** Disable std::make_unique_for_overwrite for arrays of known bound. * @tparam _Tp An array type of known bound, such as `U[N]`. diff --git a/libstdc++-v3/include/experimental/internet b/libstdc++-v3/include/experimental/internet index f6d6ef3..6ce070a 100644 --- a/libstdc++-v3/include/experimental/internet +++ b/libstdc++-v3/include/experimental/internet @@ -61,6 +61,10 @@ # include <netdb.h> // getaddrinfo etc. #endif +#if defined _WIN32 && __has_include(<ws2tcpip.h>) +# include <ws2tcpip.h> +#endif + namespace std _GLIBCXX_VISIBILITY(default) { _GLIBCXX_BEGIN_NAMESPACE_VERSION @@ -263,7 +267,11 @@ namespace ip _S_ntoh_32(uint32_t __n) { return __builtin_bswap32(__n); } #endif +#ifdef _GLIBCXX_HAVE_ARPA_INET_H in_addr_t _M_addr; // network byte order +#else + uint32_t _M_addr; +#endif }; /// An IPv6 address. @@ -705,7 +713,7 @@ namespace ip inline address_v4 make_address_v4(string_view __str, error_code& __ec) noexcept { - char __buf[INET_ADDRSTRLEN]; + char __buf[16]; // INET_ADDRSTRLEN isn't defined on Windows auto __len = __str.copy(__buf, sizeof(__buf)); if (__len == sizeof(__buf)) { @@ -1686,9 +1694,15 @@ namespace ip #ifdef AI_NUMERICSERV static constexpr flags numeric_service = (flags)AI_NUMERICSERV; #endif +#ifdef AI_V4MAPPED static constexpr flags v4_mapped = (flags)AI_V4MAPPED; +#endif +#ifdef AI_ALL static constexpr flags all_matching = (flags)AI_ALL; +#endif +#ifdef AI_ADDRCONFIG static constexpr flags address_configured = (flags)AI_ADDRCONFIG; +#endif friend constexpr flags operator&(flags __f1, flags __f2) noexcept diff --git a/libstdc++-v3/include/experimental/socket b/libstdc++-v3/include/experimental/socket index 6d1c114..9424164 100644 --- a/libstdc++-v3/include/experimental/socket +++ b/libstdc++-v3/include/experimental/socket @@ -293,11 +293,14 @@ inline namespace v1 static const int _S_level = SOL_SOCKET; static const int _S_name = SO_SNDLOWAT; }; +#endif // HAVE_SYS_SOCKET_H enum shutdown_type : int { }; +#if defined SHUT_RD && defined SHUT_WR && defined SHUT_RDWR static constexpr shutdown_type shutdown_receive = (shutdown_type)SHUT_RD; static constexpr shutdown_type shutdown_send = (shutdown_type)SHUT_WR; static constexpr shutdown_type shutdown_both = (shutdown_type)SHUT_RDWR; +#endif enum wait_type : int { }; #ifdef _GLIBCXX_HAVE_POLL_H @@ -311,14 +314,20 @@ inline namespace v1 #endif enum message_flags : int { }; +#if defined MSG_PEEK && defined MSG_OOB && defined MSG_DONTROUTE static constexpr message_flags message_peek = (message_flags)MSG_PEEK; static constexpr message_flags message_out_of_band = (message_flags)MSG_OOB; static constexpr message_flags message_do_not_route = (message_flags)MSG_DONTROUTE; +#endif - static const int max_listen_connections = SOMAXCONN; +#ifdef SOMAXCONN + static constexpr int max_listen_connections = SOMAXCONN; +#else + static constexpr int max_listen_connections = 4; +#endif // message_flags bitmask operations are defined as hidden friends. @@ -350,6 +359,7 @@ inline namespace v1 operator^=(message_flags& __f1, message_flags __f2) noexcept { return __f1 = (__f1 ^ __f2); } +#ifdef _GLIBCXX_HAVE_SYS_SOCKET_H protected: struct __msg_hdr : ::msghdr { @@ -483,7 +493,7 @@ inline namespace v1 void native_non_blocking(bool __mode, error_code& __ec) { -#ifdef _GLIBCXX_HAVE_FCNTL_H +#if defined _GLIBCXX_HAVE_FCNTL_H && defined _GLIBCXX_HAVE_DECL_O_NONBLOCK int __flags = ::fcntl(_M_sockfd, F_GETFL, 0); if (__flags >= 0) { @@ -508,7 +518,7 @@ inline namespace v1 bool native_non_blocking() const { -#ifdef _GLIBCXX_HAVE_FCNTL_H +#if defined _GLIBCXX_HAVE_FCNTL_H && defined _GLIBCXX_HAVE_DECL_O_NONBLOCK if (_M_bits.native_non_blocking == -1) { const int __flags = ::fcntl(_M_sockfd, F_GETFL, 0); @@ -714,7 +724,9 @@ inline namespace v1 { error_code __ec; cancel(__ec); +#ifdef _GLIBCXX_HAVE_SYS_SOCKET_H set_option(socket_base::linger{false, chrono::seconds{}}, __ec); +#endif ::close(_M_sockfd); } } @@ -1892,11 +1904,13 @@ inline namespace v1 { open(__protocol); } basic_socket_acceptor(io_context& __ctx, const endpoint_type& __endpoint, - bool __reuse_addr = true) + [[__maybe_unused__]] bool __reuse_addr = true) : basic_socket_acceptor(__ctx, __endpoint.protocol()) { +#ifdef _GLIBCXX_HAVE_SYS_SOCKET_H if (__reuse_addr) set_option(reuse_address(true)); +#endif bind(__endpoint); listen(); } diff --git a/libstdc++-v3/testsuite/20_util/function/cons/lwg2774.cc b/libstdc++-v3/testsuite/20_util/function/cons/lwg2774.cc new file mode 100644 index 0000000..a606104 --- /dev/null +++ b/libstdc++-v3/testsuite/20_util/function/cons/lwg2774.cc @@ -0,0 +1,31 @@ +// { dg-do run { target c++11 } } +#include <functional> +#include <testsuite_hooks.h> + +struct Funk +{ + Funk() = default; + Funk(const Funk&) { ++copies; } + Funk(Funk&&) { ++moves; } + + void operator()() const { } + + static int copies; + static int moves; +}; + +int Funk::copies = 0; +int Funk::moves = 0; + +int main() +{ + Funk e; + // LWG 2774 means there should be no move here: + std::function<void()> fc(e); + VERIFY(Funk::copies == 1); + VERIFY(Funk::moves == 0); + // And only one move here: + std::function<void()> fm(std::move(e)); + VERIFY(Funk::copies == 1); + VERIFY(Funk::moves == 1); +} diff --git a/libstdc++-v3/testsuite/20_util/function/cons/noexcept.cc b/libstdc++-v3/testsuite/20_util/function/cons/noexcept.cc new file mode 100644 index 0000000..6357198 --- /dev/null +++ b/libstdc++-v3/testsuite/20_util/function/cons/noexcept.cc @@ -0,0 +1,37 @@ +// { dg-do compile { target c++11 } } +#include <functional> + +struct X +{ + void operator()(X*); + + char bigness[100]; +}; + +using F = std::function<void(X*)>; + +static_assert( std::is_nothrow_constructible<F>::value, "" ); +static_assert( std::is_nothrow_constructible<F, F>::value, "" ); +static_assert( ! std::is_nothrow_constructible<F, F&>::value, "" ); +static_assert( ! std::is_nothrow_constructible<F, const F&>::value, "" ); +static_assert( std::is_nothrow_constructible<F, std::nullptr_t>::value, "" ); + +static_assert( ! std::is_nothrow_constructible<F, X>::value, "" ); +using R = std::reference_wrapper<X>; +static_assert( std::is_nothrow_constructible<F, R>::value, "" ); + + +// The standard requires that construction from a function pointer type +// does not throw, but doesn't require that the construction is noexcept. +// Strengthening that noexcept for these types is a GCC extension. +static_assert( std::is_nothrow_constructible<F, void(*)(X*)>::value, "" ); +// This is a GCC extension, not required by the standard: +static_assert( std::is_nothrow_constructible<F, void(&)(X*)>::value, "" ); +// This is a GCC extension, not required by the standard: +static_assert( std::is_nothrow_constructible<F, void(X::*)()>::value, "" ); + +auto c = [](X*){}; +static_assert( std::is_nothrow_constructible<F, decltype(+c)>::value, "" ); +// The standard allows this to throw, but as a GCC extenension we store +// closures with no captures in the std::function, so this is noexcept too: +static_assert( std::is_nothrow_constructible<F, decltype(c)>::value, "" ); diff --git a/libstdc++-v3/testsuite/experimental/net/internet/resolver/base.cc b/libstdc++-v3/testsuite/experimental/net/internet/resolver/base.cc index 4c36b03..e2167b7 100644 --- a/libstdc++-v3/testsuite/experimental/net/internet/resolver/base.cc +++ b/libstdc++-v3/testsuite/experimental/net/internet/resolver/base.cc @@ -32,9 +32,15 @@ static_assert( __gnu_test::test_bitmask_values({ #ifdef AI_NUMERICSERV resolver_base::numeric_service, #endif +#ifdef AI_V4MAPPED resolver_base::v4_mapped, +#endif +#ifdef AI_ALL resolver_base::all_matching, +#endif +#ifdef AI_ADDRCONFIG resolver_base::address_configured +#endif }), "each bitmask element is distinct" ); static_assert( ! std::is_default_constructible<resolver_base>(), "protected" ); diff --git a/libstdc++-v3/testsuite/experimental/net/socket/basic_socket.cc b/libstdc++-v3/testsuite/experimental/net/socket/basic_socket.cc index 7fe3ec0..6cf11f6 100644 --- a/libstdc++-v3/testsuite/experimental/net/socket/basic_socket.cc +++ b/libstdc++-v3/testsuite/experimental/net/socket/basic_socket.cc @@ -22,10 +22,6 @@ namespace net = std::experimental::net; using namespace std; -namespace test -{ -} - void test01(net::io_context& io) { @@ -113,8 +109,10 @@ test01(net::io_context& io) s.bind(e); s.bind(e, ec); +#ifdef SHUT_RDWR s.shutdown(net::socket_base::shutdown_both); s.shutdown(net::socket_base::shutdown_both, ec); +#endif e = s.local_endpoint(); e = s.local_endpoint(ec); diff --git a/libstdc++-v3/testsuite/experimental/net/socket/socket_base.cc b/libstdc++-v3/testsuite/experimental/net/socket/socket_base.cc index f957b6c..67da9db 100644 --- a/libstdc++-v3/testsuite/experimental/net/socket/socket_base.cc +++ b/libstdc++-v3/testsuite/experimental/net/socket/socket_base.cc @@ -174,24 +174,28 @@ void test_option_types() void test_constants() { -#if __has_include(<sys/socket.h>) static_assert( is_enum<S::shutdown_type>::value, "" ); +#if __has_include(<sys/socket.h>) && defined SHUT_RDWR static_assert( S::shutdown_receive != S::shutdown_send, "" ); static_assert( S::shutdown_receive != S::shutdown_both, "" ); static_assert( S::shutdown_send != S::shutdown_both, "" ); +#endif static_assert( is_enum<S::wait_type>::value, "" ); +#if __has_include(<poll.h>) && defined POLLIN static_assert( S::wait_read != S::wait_write, ""); static_assert( S::wait_read != S::wait_error, ""); static_assert( S::wait_write != S::wait_error, ""); +#endif + static_assert( is_enum<S::message_flags>::value, "" ); +#if __has_include(<sys/socket.h>) && defined MSG_OOB static_assert( __gnu_test::test_bitmask_values( {S::message_peek, S::message_out_of_band, S::message_do_not_route} ), "each bitmask element is distinct" ); - - auto m = &S::max_listen_connections; - static_assert( is_same<decltype(m), const int*>::value, "" ); #endif + + static_assert( is_same<decltype(S::max_listen_connections), const int>::value, "" ); } int main() |