diff options
author | Martin Liska <mliska@suse.cz> | 2021-12-22 12:03:07 +0100 |
---|---|---|
committer | Martin Liska <mliska@suse.cz> | 2021-12-22 12:03:07 +0100 |
commit | 6a96a486f3f4fc538f8889bd68e85be3c9fdf28f (patch) | |
tree | 09ce06bfe7899631c78fb7e8cbd2c0b20e5f28a9 | |
parent | 303634cc437784660b59f96b7ab9d4d52acf7dc3 (diff) | |
parent | 1a6592ff65e443e66d943103d05701cafdda9149 (diff) | |
download | gcc-6a96a486f3f4fc538f8889bd68e85be3c9fdf28f.zip gcc-6a96a486f3f4fc538f8889bd68e85be3c9fdf28f.tar.gz gcc-6a96a486f3f4fc538f8889bd68e85be3c9fdf28f.tar.bz2 |
Merge branch 'master' into devel/sphinx
78 files changed, 3061 insertions, 1899 deletions
@@ -1,3 +1,11 @@ +2021-12-21 Iain Buclaw <ibuclaw@gdcproject.org> + + * configure: Regenerate. + +2021-12-21 Martin Liska <mliska@suse.cz> + + * config.sub: Change mode back to 755. + 2021-12-17 Martin Liska <mliska@suse.cz> * config.sub: Sync from master. diff --git a/config.sub b/config.sub index 38f3d03..38f3d03 100644..100755 --- a/config.sub +++ b/config.sub diff --git a/config/ChangeLog b/config/ChangeLog index 5bc7097..066c01f 100644 --- a/config/ChangeLog +++ b/config/ChangeLog @@ -1,3 +1,8 @@ +2021-12-21 Iain Buclaw <ibuclaw@gdcproject.org> + + PR d/103528 + * acx.m4 (ACX_PROG_GDC): Add check whether D compiler works. + 2021-12-16 H.J. Lu <hjl.tools@gmail.com> Revert: diff --git a/config/acx.m4 b/config/acx.m4 index 3c65d82..b86c4f9 100644 --- a/config/acx.m4 +++ b/config/acx.m4 @@ -425,7 +425,20 @@ AC_DEFUN([ACX_PROG_GDC], [AC_REQUIRE([AC_CHECK_TOOL_PREFIX]) AC_REQUIRE([AC_PROG_CC]) AC_CHECK_TOOL(GDC, gdc, no) +AC_CACHE_CHECK([whether the D compiler works], + acx_cv_d_compiler_works, +[cat >conftest.d <<EOF +module conftest; int main() { return 0; } +EOF +acx_cv_d_compiler_works=no if test "x$GDC" != xno; then + errors=`(${GDC} $1[]m4_ifval([$1], [ ])-c conftest.d) 2>&1 || echo failure` + if test x"$errors" = x && test -f conftest.$ac_objext; then + acx_cv_d_compiler_works=yes + fi + rm -f conftest.* +fi]) +if test "x$GDC" != xno && test x$acx_cv_d_compiler_works != xno; then have_gdc=yes else have_gdc=no @@ -5727,7 +5727,26 @@ else GDC="$ac_cv_prog_GDC" fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the D compiler works" >&5 +$as_echo_n "checking whether the D compiler works... " >&6; } +if ${acx_cv_d_compiler_works+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat >conftest.d <<EOF +module conftest; int main() { return 0; } +EOF +acx_cv_d_compiler_works=no if test "x$GDC" != xno; then + errors=`(${GDC} -c conftest.d) 2>&1 || echo failure` + if test x"$errors" = x && test -f conftest.$ac_objext; then + acx_cv_d_compiler_works=yes + fi + rm -f conftest.* +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $acx_cv_d_compiler_works" >&5 +$as_echo "$acx_cv_d_compiler_works" >&6; } +if test "x$GDC" != xno && test x$acx_cv_d_compiler_works != xno; then have_gdc=yes else have_gdc=no diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 5ab864a..43d073e 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,86 @@ +2021-12-21 Jiang Haochen <haochen.jiang@intel.com> + + * config/i386/bmiintrin.h (_tzcnt_u16): New intrinsic. + (_andn_u32): Ditto. + (_andn_u64): Ditto. + +2021-12-21 Xionghu Luo <luoxhu@linux.ibm.com> + + * tree-ssa-loop-im.c (bb_colder_than_loop_preheader): New + function. + (get_coldest_out_loop): New function. + (determine_max_movement): Use get_coldest_out_loop. + (move_computations_worker): Adjust and fix iteration udpate. + (class ref_in_loop_hot_body): New functor. + (ref_in_loop_hot_body::operator): New. + (can_sm_ref_p): Use for_all_locs_in_loop. + (fill_coldest_and_hotter_out_loop): New. + (tree_ssa_lim_finalize): Free coldest_outermost_loop and + hotter_than_inner_loop. + (loop_invariant_motion_in_fun): Call fill_coldest_and_hotter_out_loop. + +2021-12-21 Xionghu Luo <luoxhu@linux.ibm.com> + + * tree-ssa-loop-split.c (split_loop): Fix incorrect + profile_count and probability. + (do_split_loop_on_cond): Likewise. + +2021-12-21 Xionghu Luo <luoxhu@linux.ibm.com> + + PR middle-end/103270 + * predict.c (predict_extra_loop_exits): Add loop parameter. + (predict_loops): Call with loop argument. + +2021-12-21 Xionghu Luo <luoxhu@linux.ibm.com> + + * config/rs6000/altivec.md (altivec_vaddu<VI_char>s): Replace + UNSPEC_VADDU with us_plus. + (altivec_vadds<VI_char>s): Replace UNSPEC_VADDS with ss_plus. + (altivec_vsubu<VI_char>s): Replace UNSPEC_VSUBU with us_minus. + (altivec_vsubs<VI_char>s): Replace UNSPEC_VSUBS with ss_minus. + (altivec_abss_<mode>): Likewise. + +2021-12-20 Uroš Bizjak <ubizjak@gmail.com> + + PR target/103772 + * config/i386/sse.md (<sse2p4_1>_pinsr<ssemodesuffix>): Add + earlyclobber to (x,x,x,i) alternative. + (<sse2p4_1>_pinsr<ssemodesuffix> peephole2): Remove. + (<sse2p4_1>_pinsr<ssemodesuffix> splitter): Use output + operand as a temporary register. Split after reload_completed. + +2021-12-20 Roger Sayle <roger@nextmovesoftware.com> + Uroš Bizjak <ubizjak@gmail.com> + + * config/i386/i386.md (any_mul_highpart): New code iterator. + (sgnprefix, s): Add attribute support for [su]mul_highpart. + (<s>mul<mode>3_highpart): Delete expander. + (<s>mul<mode>3_highpart, <s>mulsi32_highpart_zext): + New define_insn patterns. + (define_peephole2): Tweak the register allocation for the above + instructions after reload. + +2021-12-20 Joel Sherrill <joel@rtems.org> + + * config.gcc: Obsolete m32c-*-rtems* target. + +2021-12-20 Martin Liska <mliska@suse.cz> + + * opts.c (default_options_optimization): Support -Oz in -Ox option hints. + +2021-12-20 Jan Hubicka <hubicka@ucw.cz> + + PR ipa/103669 + * ipa-modref.c (modref_eaf_analysis::analyze_ssa_name): Add deferred + parameter. + (modref_eaf_analysis::propagate): Use it. + +2021-12-20 liuhongt <hongtao.liu@intel.com> + + PR target/98468 + * config/i386/sse.md (*bit_and_float_vector_all_ones): New + pre-reload splitter. + 2021-12-19 Jan Hubicka <hubicka@ucw.cz> PR ipa/103766 diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP index 40a9145..275f771 100644 --- a/gcc/DATESTAMP +++ b/gcc/DATESTAMP @@ -1 +1 @@ -20211220 +20211222 diff --git a/gcc/config.gcc b/gcc/config.gcc index c8c0ef9..91806aa 100644 --- a/gcc/config.gcc +++ b/gcc/config.gcc @@ -252,6 +252,7 @@ case ${target} in | cr16-*-* \ | hppa[12]*-*-hpux10* \ | hppa[12]*-*-hpux11* \ + | m32c-*-rtems* \ ) if test "x$enable_obsolete" != xyes; then echo "*** Configuration ${target} is obsolete." >&2 diff --git a/gcc/config/i386/bmiintrin.h b/gcc/config/i386/bmiintrin.h index 439d81c..92450a6 100644 --- a/gcc/config/i386/bmiintrin.h +++ b/gcc/config/i386/bmiintrin.h @@ -40,6 +40,12 @@ __tzcnt_u16 (unsigned short __X) return __builtin_ia32_tzcnt_u16 (__X); } +extern __inline unsigned short __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_tzcnt_u16 (unsigned short __X) +{ + return __builtin_ia32_tzcnt_u16 (__X); +} + extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __andn_u32 (unsigned int __X, unsigned int __Y) { @@ -47,6 +53,12 @@ __andn_u32 (unsigned int __X, unsigned int __Y) } extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_andn_u32 (unsigned int __X, unsigned int __Y) +{ + return __andn_u32 (__X, __Y); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __bextr_u32 (unsigned int __X, unsigned int __Y) { return __builtin_ia32_bextr_u32 (__X, __Y); @@ -115,6 +127,12 @@ __andn_u64 (unsigned long long __X, unsigned long long __Y) } extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_andn_u64 (unsigned long long __X, unsigned long long __Y) +{ + return __andn_u64 (__X, __Y); +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __bextr_u64 (unsigned long long __X, unsigned long long __Y) { return __builtin_ia32_bextr_u64 (__X, __Y); diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md index d25453f..58b1064 100644 --- a/gcc/config/i386/i386.md +++ b/gcc/config/i386/i386.md @@ -992,11 +992,16 @@ ;; Mapping of extend operators (define_code_iterator any_extend [sign_extend zero_extend]) +;; Mapping of highpart multiply operators +(define_code_iterator any_mul_highpart [smul_highpart umul_highpart]) + ;; Prefix for insn menmonic. (define_code_attr sgnprefix [(sign_extend "i") (zero_extend "") + (smul_highpart "i") (umul_highpart "") (div "i") (udiv "")]) ;; Prefix for define_insn -(define_code_attr s [(sign_extend "s") (zero_extend "u")]) +(define_code_attr s [(sign_extend "s") (zero_extend "u") + (smul_highpart "s") (umul_highpart "u")]) (define_code_attr u [(sign_extend "") (zero_extend "u") (div "") (udiv "u")]) (define_code_attr u_bool [(sign_extend "false") (zero_extend "true") @@ -8433,20 +8438,45 @@ (set_attr "bdver1_decode" "direct") (set_attr "mode" "QI")]) -(define_expand "<s>mul<mode>3_highpart" - [(parallel [(set (match_operand:DWIH 0 "register_operand") - (truncate:DWIH - (lshiftrt:<DWI> - (mult:<DWI> - (any_extend:<DWI> - (match_operand:DWIH 1 "nonimmediate_operand")) - (any_extend:<DWI> - (match_operand:DWIH 2 "register_operand"))) - (match_dup 3)))) - (clobber (scratch:DWIH)) - (clobber (reg:CC FLAGS_REG))])] +;; Highpart multiplication patterns +(define_insn "<s>mul<mode>3_highpart" + [(set (match_operand:DWIH 0 "register_operand" "=d") + (any_mul_highpart:DWIH + (match_operand:DWIH 1 "register_operand" "%a") + (match_operand:DWIH 2 "nonimmediate_operand" "rm"))) + (clobber (match_scratch:DWIH 3 "=1")) + (clobber (reg:CC FLAGS_REG))] "" - "operands[3] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode));") + "<sgnprefix>mul{<imodesuffix>}\t%2" + [(set_attr "type" "imul") + (set_attr "length_immediate" "0") + (set (attr "athlon_decode") + (if_then_else (eq_attr "cpu" "athlon") + (const_string "vector") + (const_string "double"))) + (set_attr "amdfam10_decode" "double") + (set_attr "bdver1_decode" "direct") + (set_attr "mode" "<MODE>")]) + +(define_insn "*<s>mulsi3_highpart_zext" + [(set (match_operand:DI 0 "register_operand" "=d") + (zero_extend:DI + (any_mul_highpart:SI + (match_operand:SI 1 "register_operand" "%a") + (match_operand:SI 2 "nonimmediate_operand" "rm")))) + (clobber (match_scratch:SI 3 "=1")) + (clobber (reg:CC FLAGS_REG))] + "TARGET_64BIT" + "<sgnprefix>mul{l}\t%2" + [(set_attr "type" "imul") + (set_attr "length_immediate" "0") + (set (attr "athlon_decode") + (if_then_else (eq_attr "cpu" "athlon") + (const_string "vector") + (const_string "double"))) + (set_attr "amdfam10_decode" "double") + (set_attr "bdver1_decode" "direct") + (set_attr "mode" "SI")]) (define_insn "*<s>muldi3_highpart_1" [(set (match_operand:DI 0 "register_operand" "=d") @@ -8467,8 +8497,8 @@ (set_attr "length_immediate" "0") (set (attr "athlon_decode") (if_then_else (eq_attr "cpu" "athlon") - (const_string "vector") - (const_string "double"))) + (const_string "vector") + (const_string "double"))) (set_attr "amdfam10_decode" "double") (set_attr "bdver1_decode" "direct") (set_attr "mode" "DI")]) @@ -8491,8 +8521,8 @@ (set_attr "length_immediate" "0") (set (attr "athlon_decode") (if_then_else (eq_attr "cpu" "athlon") - (const_string "vector") - (const_string "double"))) + (const_string "vector") + (const_string "double"))) (set_attr "amdfam10_decode" "double") (set_attr "bdver1_decode" "direct") (set_attr "mode" "SI")]) @@ -8515,12 +8545,54 @@ (set_attr "length_immediate" "0") (set (attr "athlon_decode") (if_then_else (eq_attr "cpu" "athlon") - (const_string "vector") - (const_string "double"))) + (const_string "vector") + (const_string "double"))) (set_attr "amdfam10_decode" "double") (set_attr "bdver1_decode" "direct") (set_attr "mode" "SI")]) +;; Highpart multiplication peephole2s to tweak register allocation. +;; mov %rdx,imm; mov %rax,%rdi; imulq %rdx -> mov %rax,imm; imulq %rdi +(define_peephole2 + [(set (match_operand:SWI48 0 "general_reg_operand") + (match_operand:SWI48 1 "immediate_operand")) + (set (match_operand:SWI48 2 "general_reg_operand") + (match_operand:SWI48 3 "general_reg_operand")) + (parallel [(set (match_operand:SWI48 4 "general_reg_operand") + (any_mul_highpart:SWI48 (match_dup 2) (match_dup 0))) + (clobber (match_dup 2)) + (clobber (reg:CC FLAGS_REG))])] + "REGNO (operands[0]) != REGNO (operands[2]) + && REGNO (operands[0]) != REGNO (operands[3]) + && (REGNO (operands[0]) == REGNO (operands[4]) + || peep2_reg_dead_p (3, operands[0]))" + [(set (match_dup 2) (match_dup 1)) + (parallel [(set (match_dup 4) + (any_mul_highpart:SWI48 (match_dup 2) (match_dup 3))) + (clobber (match_dup 2)) + (clobber (reg:CC FLAGS_REG))])]) + +(define_peephole2 + [(set (match_operand:SI 0 "general_reg_operand") + (match_operand:SI 1 "immediate_operand")) + (set (match_operand:SI 2 "general_reg_operand") + (match_operand:SI 3 "general_reg_operand")) + (parallel [(set (match_operand:DI 4 "general_reg_operand") + (zero_extend:DI + (any_mul_highpart:SI (match_dup 2) (match_dup 0)))) + (clobber (match_dup 2)) + (clobber (reg:CC FLAGS_REG))])] + "REGNO (operands[0]) != REGNO (operands[2]) + && REGNO (operands[0]) != REGNO (operands[3]) + && (REGNO (operands[0]) == REGNO (operands[4]) + || peep2_reg_dead_p (3, operands[0]))" + [(set (match_dup 2) (match_dup 1)) + (parallel [(set (match_dup 4) + (zero_extend:DI + (any_mul_highpart:SI (match_dup 2) (match_dup 3)))) + (clobber (match_dup 2)) + (clobber (reg:CC FLAGS_REG))])]) + ;; The patterns that match these are at the end of this file. (define_expand "mulxf3" diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md index 5196149..cb1c0b1 100644 --- a/gcc/config/i386/sse.md +++ b/gcc/config/i386/sse.md @@ -17430,7 +17430,7 @@ ;; sse4_1_pinsrd must come before sse2_loadld since it is preferred. (define_insn "<sse2p4_1>_pinsr<ssemodesuffix>" - [(set (match_operand:PINSR_MODE 0 "register_operand" "=x,x,x,x,v,v,x") + [(set (match_operand:PINSR_MODE 0 "register_operand" "=x,x,x,x,v,v,&x") (vec_merge:PINSR_MODE (vec_duplicate:PINSR_MODE (match_operand:<ssescalarmode> 2 "nonimmediate_operand" "r,m,r,m,r,m,x")) @@ -17499,25 +17499,6 @@ (const_string "*")))]) ;; For TARGET_AVX2, implement insert from XMM reg with PBROADCASTW + PBLENDW. -;; First try to get a scratch register and go through it. In case this fails, -;; overwrite source reg with broadcasted value and blend from there. -(define_peephole2 - [(match_scratch:V8_128 4 "x") - (set (match_operand:V8_128 0 "sse_reg_operand") - (vec_merge:V8_128 - (vec_duplicate:V8_128 - (match_operand:<ssescalarmode> 2 "sse_reg_operand")) - (match_operand:V8_128 1 "sse_reg_operand") - (match_operand:SI 3 "const_int_operand")))] - "TARGET_AVX2 - && INTVAL (operands[3]) > 1 - && ((unsigned) exact_log2 (INTVAL (operands[3])) - < GET_MODE_NUNITS (<MODE>mode))" - [(set (match_dup 4) - (vec_duplicate:V8_128 (match_dup 2))) - (set (match_dup 0) - (vec_merge:V8_128 (match_dup 4) (match_dup 1) (match_dup 3)))]) - (define_split [(set (match_operand:V8_128 0 "sse_reg_operand") (vec_merge:V8_128 @@ -17525,18 +17506,14 @@ (match_operand:<ssescalarmode> 2 "sse_reg_operand")) (match_operand:V8_128 1 "sse_reg_operand") (match_operand:SI 3 "const_int_operand")))] - "TARGET_AVX2 && epilogue_completed + "TARGET_AVX2 && reload_completed && INTVAL (operands[3]) > 1 && ((unsigned) exact_log2 (INTVAL (operands[3])) < GET_MODE_NUNITS (<MODE>mode))" - [(set (match_dup 4) + [(set (match_dup 0) (vec_duplicate:V8_128 (match_dup 2))) (set (match_dup 0) - (vec_merge:V8_128 (match_dup 4) (match_dup 1) (match_dup 3)))] -{ - operands[4] = lowpart_subreg (<MODE>mode, operands[2], - <ssescalarmode>mode); -}) + (vec_merge:V8_128 (match_dup 0) (match_dup 1) (match_dup 3)))]) (define_expand "<extract_type>_vinsert<shuffletype><extract_suf>_mask" [(match_operand:AVX512_VEC 0 "register_operand") diff --git a/gcc/config/rs6000/altivec.md b/gcc/config/rs6000/altivec.md index ef43211..c2312cc 100644 --- a/gcc/config/rs6000/altivec.md +++ b/gcc/config/rs6000/altivec.md @@ -29,8 +29,6 @@ UNSPEC_VMHADDSHS UNSPEC_VMHRADDSHS UNSPEC_VADDCUW - UNSPEC_VADDU - UNSPEC_VADDS UNSPEC_VAVGU UNSPEC_VAVGS UNSPEC_VMULEUB @@ -61,8 +59,6 @@ UNSPEC_VSR UNSPEC_VSRO UNSPEC_VSUBCUW - UNSPEC_VSUBU - UNSPEC_VSUBS UNSPEC_VSUM4UBS UNSPEC_VSUM4S UNSPEC_VSUM2SWS @@ -517,9 +513,8 @@ (define_insn "altivec_vaddu<VI_char>s" [(set (match_operand:VI 0 "register_operand" "=v") - (unspec:VI [(match_operand:VI 1 "register_operand" "v") - (match_operand:VI 2 "register_operand" "v")] - UNSPEC_VADDU)) + (us_plus:VI (match_operand:VI 1 "register_operand" "v") + (match_operand:VI 2 "register_operand" "v"))) (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] "<VI_unit>" "vaddu<VI_char>s %0,%1,%2" @@ -527,9 +522,8 @@ (define_insn "altivec_vadds<VI_char>s" [(set (match_operand:VI 0 "register_operand" "=v") - (unspec:VI [(match_operand:VI 1 "register_operand" "v") - (match_operand:VI 2 "register_operand" "v")] - UNSPEC_VADDS)) + (ss_plus:VI (match_operand:VI 1 "register_operand" "v") + (match_operand:VI 2 "register_operand" "v"))) (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)" "vadds<VI_char>s %0,%1,%2" @@ -563,9 +557,8 @@ (define_insn "altivec_vsubu<VI_char>s" [(set (match_operand:VI 0 "register_operand" "=v") - (unspec:VI [(match_operand:VI 1 "register_operand" "v") - (match_operand:VI 2 "register_operand" "v")] - UNSPEC_VSUBU)) + (us_minus:VI (match_operand:VI 1 "register_operand" "v") + (match_operand:VI 2 "register_operand" "v"))) (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)" "vsubu<VI_char>s %0,%1,%2" @@ -573,9 +566,8 @@ (define_insn "altivec_vsubs<VI_char>s" [(set (match_operand:VI 0 "register_operand" "=v") - (unspec:VI [(match_operand:VI 1 "register_operand" "v") - (match_operand:VI 2 "register_operand" "v")] - UNSPEC_VSUBS)) + (ss_minus:VI (match_operand:VI 1 "register_operand" "v") + (match_operand:VI 2 "register_operand" "v"))) (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)" "vsubs<VI_char>s %0,%1,%2" @@ -3480,9 +3472,8 @@ (define_expand "altivec_abss_<mode>" [(set (match_dup 2) (vec_duplicate:VI (const_int 0))) (parallel [(set (match_dup 3) - (unspec:VI [(match_dup 2) - (match_operand:VI 1 "register_operand" "v")] - UNSPEC_VSUBS)) + (ss_minus:VI (match_dup 2) + (match_operand:VI 1 "register_operand" "v"))) (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]) (set (match_operand:VI 0 "register_operand" "=v") diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog index bf5c30c..7d5f20f 100644 --- a/gcc/cp/ChangeLog +++ b/gcc/cp/ChangeLog @@ -1,3 +1,22 @@ +2021-12-20 Patrick Palka <ppalka@redhat.com> + + * call.c (build_new_method_call): Set + BASELINK_FUNCTIONS_MAYBE_INCOMPLETE_P on the pruned baselink. + * cp-tree.h (BASELINK_FUNCTIONS_MAYBE_INCOMPLETE_P): Define. + * pt.c (filter_memfn_lookup): New subroutine of tsubst_baselink. + (tsubst_baselink): Use filter_memfn_lookup on the new lookup + result when BASELINK_FUNCTIONS_MAYBE_INCOMPLETE_P is set on the + old baselink. Remove redundant BASELINK_P check. + * search.c (build_baselink): Set + BASELINK_FUNCTIONS_MAYBE_INCOMPLETE_P appropriately. + +2021-12-20 Patrick Palka <ppalka@redhat.com> + + * call.c (build_new_method_call): For a non-dependent call + expression inside a template, returning a templated tree + whose overload set contains just the selected function. + * semantics.c (finish_call_expr): Likewise. + 2021-12-19 Matthias Kretz <m.kretz@gsi.de> * module.cc (trees_out::get_merge_kind): NAMESPACE_DECLs also diff --git a/gcc/cp/call.c b/gcc/cp/call.c index 495dcdd..bee367f 100644 --- a/gcc/cp/call.c +++ b/gcc/cp/call.c @@ -11163,6 +11163,34 @@ build_new_method_call (tree instance, tree fns, vec<tree, va_gc> **args, } if (INDIRECT_REF_P (call)) call = TREE_OPERAND (call, 0); + + /* Prune all but the selected function from the original overload + set so that we can avoid some duplicate work at instantiation time. */ + if (really_overloaded_fn (fns)) + { + if (DECL_TEMPLATE_INFO (fn) + && DECL_MEMBER_TEMPLATE_P (DECL_TI_TEMPLATE (fn)) + && dependent_type_p (DECL_CONTEXT (fn))) + { + /* FIXME: We're not prepared to fully instantiate "inside-out" + partial instantiations such as A<T>::f<int>(). So instead + use the selected template, not the specialization. */ + + if (OVL_SINGLE_P (fns)) + /* If the original overload set consists of a single function + template, this isn't beneficial. */ + goto skip_prune; + + fn = ovl_make (DECL_TI_TEMPLATE (fn)); + if (template_only) + fn = lookup_template_function (fn, explicit_targs); + } + orig_fns = copy_node (orig_fns); + BASELINK_FUNCTIONS (orig_fns) = fn; + BASELINK_FUNCTIONS_MAYBE_INCOMPLETE_P (orig_fns) = true; + } + +skip_prune: call = (build_min_non_dep_call_vec (call, build_min (COMPONENT_REF, TREE_TYPE (CALL_EXPR_FN (call)), diff --git a/gcc/cp/cp-tree.h b/gcc/cp/cp-tree.h index 8b5cfa2..5fc9e5e 100644 --- a/gcc/cp/cp-tree.h +++ b/gcc/cp/cp-tree.h @@ -464,6 +464,7 @@ extern GTY(()) tree cp_global_trees[CPTI_MAX]; PACK_EXPANSION_SIZEOF_P (in *_PACK_EXPANSION) OVL_USING_P (in OVERLOAD) IMPLICIT_CONV_EXPR_NONTYPE_ARG (in IMPLICIT_CONV_EXPR) + BASELINK_FUNCTIONS_MAYBE_INCOMPLETE_P (in BASELINK) 2: IDENTIFIER_KIND_BIT_2 (in IDENTIFIER_NODE) ICS_THIS_FLAG (in _CONV) DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (in VAR_DECL) @@ -1060,6 +1061,10 @@ struct GTY(()) tree_template_decl { /* Nonzero if this baselink was from a qualified lookup. */ #define BASELINK_QUALIFIED_P(NODE) \ TREE_LANG_FLAG_0 (BASELINK_CHECK (NODE)) +/* Nonzero if the overload set for this baselink might be incomplete due + to the lookup being performed from an incomplete-class context. */ +#define BASELINK_FUNCTIONS_MAYBE_INCOMPLETE_P(NODE) \ + TREE_LANG_FLAG_1 (BASELINK_CHECK (NODE)) struct GTY(()) tree_baselink { struct tree_common common; diff --git a/gcc/cp/pt.c b/gcc/cp/pt.c index 4f0ae6d..a115e1d 100644 --- a/gcc/cp/pt.c +++ b/gcc/cp/pt.c @@ -16221,6 +16221,81 @@ tsubst (tree t, tree args, tsubst_flags_t complain, tree in_decl) } } +/* OLDFNS is a lookup set of member functions from some class template, and + NEWFNS is a lookup set of member functions from a specialization of that + class template. Return the subset of NEWFNS which are specializations of + a function from OLDFNS. */ + +static tree +filter_memfn_lookup (tree oldfns, tree newfns) +{ + /* Record all member functions from the old lookup set OLDFNS into + VISIBLE_SET. */ + hash_set<tree> visible_set; + for (tree fn : lkp_range (oldfns)) + { + if (TREE_CODE (fn) == USING_DECL) + { + /* FIXME: Punt on (dependent) USING_DECL for now; mapping + a dependent USING_DECL to its instantiation seems + tricky. */ + gcc_checking_assert (DECL_DEPENDENT_P (fn)); + return newfns; + } + else if (TREE_CODE (fn) == TEMPLATE_DECL) + /* A member function template. */ + visible_set.add (fn); + else if (TREE_CODE (fn) == FUNCTION_DECL) + { + if (DECL_TEMPLATE_INFO (fn)) + /* A non-template member function. */ + visible_set.add (DECL_TI_TEMPLATE (fn)); + else + /* A non-template member function from a non-template base, + injected via a using-decl. */ + visible_set.add (fn); + } + else + gcc_unreachable (); + } + + /* Returns true iff (a less specialized version of) FN appeared in + the old lookup set OLDFNS. */ + auto visible_p = [&visible_set] (tree fn) { + if (TREE_CODE (fn) == FUNCTION_DECL + && !DECL_TEMPLATE_INFO (fn)) + return visible_set.contains (fn); + else if (DECL_TEMPLATE_INFO (fn)) + return visible_set.contains (DECL_TI_TEMPLATE (fn)); + else + gcc_unreachable (); + }; + + bool lookup_changed_p = false; + for (tree fn : lkp_range (newfns)) + if (!visible_p (fn)) + { + lookup_changed_p = true; + break; + } + if (!lookup_changed_p) + return newfns; + + /* Filter out from NEWFNS the member functions that weren't + previously visible according to OLDFNS. */ + tree filtered_fns = NULL_TREE; + unsigned filtered_size = 0; + for (tree fn : lkp_range (newfns)) + if (visible_p (fn)) + { + filtered_fns = lookup_add (fn, filtered_fns); + filtered_size++; + } + gcc_checking_assert (filtered_size == visible_set.elements ()); + + return filtered_fns; +} + /* tsubst a BASELINK. OBJECT_TYPE, if non-NULL, is the type of the expression on the left-hand side of the "." or "->" operator. We only do the lookup if we had a dependent BASELINK. Otherwise we @@ -16274,8 +16349,21 @@ tsubst_baselink (tree baselink, tree object_type, /* Treat as-if non-dependent below. */ dependent_p = false; + bool maybe_incomplete = BASELINK_FUNCTIONS_MAYBE_INCOMPLETE_P (baselink); baselink = lookup_fnfields (qualifying_scope, name, /*protect=*/1, complain); + if (maybe_incomplete) + { + /* Filter out from the new lookup set those functions which didn't + appear in the original lookup set (in a less specialized form). + This is needed to preserve the consistency of member lookup + performed in an incomplete-class context, within which + later-declared members ought to remain invisible. */ + BASELINK_FUNCTIONS (baselink) + = filter_memfn_lookup (fns, BASELINK_FUNCTIONS (baselink)); + BASELINK_FUNCTIONS_MAYBE_INCOMPLETE_P (baselink) = true; + } + if (!baselink) { if ((complain & tf_error) @@ -16285,8 +16373,7 @@ tsubst_baselink (tree baselink, tree object_type, return error_mark_node; } - if (BASELINK_P (baselink)) - fns = BASELINK_FUNCTIONS (baselink); + fns = BASELINK_FUNCTIONS (baselink); } else { diff --git a/gcc/cp/search.c b/gcc/cp/search.c index 943671a..b673db9 100644 --- a/gcc/cp/search.c +++ b/gcc/cp/search.c @@ -1091,6 +1091,10 @@ build_baselink (tree binfo, tree access_binfo, tree functions, tree optype) BASELINK_FUNCTIONS (baselink) = functions; BASELINK_OPTYPE (baselink) = optype; + if (binfo == access_binfo + && TYPE_BEING_DEFINED (BINFO_TYPE (access_binfo))) + BASELINK_FUNCTIONS_MAYBE_INCOMPLETE_P (baselink) = true; + return baselink; } diff --git a/gcc/cp/semantics.c b/gcc/cp/semantics.c index 6603066..6ffd82c 100644 --- a/gcc/cp/semantics.c +++ b/gcc/cp/semantics.c @@ -2893,6 +2893,21 @@ finish_call_expr (tree fn, vec<tree, va_gc> **args, bool disallow_virtual, { if (INDIRECT_REF_P (result)) result = TREE_OPERAND (result, 0); + + /* Prune all but the selected function from the original overload + set so that we can avoid some duplicate work at instantiation time. */ + if (TREE_CODE (result) == CALL_EXPR + && really_overloaded_fn (orig_fn)) + { + orig_fn = CALL_EXPR_FN (result); + if (TREE_CODE (orig_fn) == COMPONENT_REF) + { + /* The non-dependent result of build_new_method_call. */ + orig_fn = TREE_OPERAND (orig_fn, 1); + gcc_assert (BASELINK_P (orig_fn)); + } + } + result = build_call_vec (TREE_TYPE (result), orig_fn, orig_args); SET_EXPR_LOCATION (result, input_location); KOENIG_LOOKUP_P (result) = koenig_p; diff --git a/gcc/d/ChangeLog b/gcc/d/ChangeLog index 8bc711f..f878c33 100644 --- a/gcc/d/ChangeLog +++ b/gcc/d/ChangeLog @@ -1,3 +1,10 @@ +2021-12-20 Iain Buclaw <ibuclaw@gdcproject.org> + + * dmd/MERGE: Merge upstream dmd ad8412530. + * expr.cc (ExprVisitor::visit (DeleteExp *)): Remove code generation + of _d_delstruct. + * runtime.def (DELSTRUCT): Remove. + 2021-12-15 Iain Buclaw <ibuclaw@gdcproject.org> * d-lang.cc (d_init_options): Set default -fextern-std= to C++17. diff --git a/gcc/d/dmd/MERGE b/gcc/d/dmd/MERGE index d7eff4f..b42576c 100644 --- a/gcc/d/dmd/MERGE +++ b/gcc/d/dmd/MERGE @@ -1,4 +1,4 @@ -93108bb9ea6216d67fa97bb4842fb59f26f6bfc7 +ad8412530e607ffebec36f2dbdff1a6f2798faf7 The first line of this file holds the git revision number of the last merge done from the dlang/dmd repository. diff --git a/gcc/d/dmd/canthrow.d b/gcc/d/dmd/canthrow.d index b67a9d1..b187715 100644 --- a/gcc/d/dmd/canthrow.d +++ b/gcc/d/dmd/canthrow.d @@ -82,6 +82,22 @@ extern (C++) bool canThrow(Expression e, FuncDeclaration func, bool mustNotThrow if (global.errors && !ce.e1.type) return; // error recovery + + import dmd.id : Id; + + if (ce.f && ce.f.ident == Id._d_delstruct) + { + // Only check if the dtor throws. + Type tb = (*ce.arguments)[0].type.toBasetype(); + auto ts = tb.nextOf().baseElemOf().isTypeStruct(); + if (ts) + { + auto sd = ts.sym; + if (sd.dtor) + checkFuncThrows(ce, sd.dtor); + } + } + /* If calling a function or delegate that is typed as nothrow, * then this expression cannot throw. * Note that pure functions can throw. diff --git a/gcc/d/dmd/dcast.d b/gcc/d/dmd/dcast.d index 2e5a79d..a572a1f 100644 --- a/gcc/d/dmd/dcast.d +++ b/gcc/d/dmd/dcast.d @@ -1565,9 +1565,9 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null) result = e; return; } - if (e.op == EXP.variable) + if (auto ve = e.isVarExp()) { - VarDeclaration v = (cast(VarExp)e).var.isVarDeclaration(); + VarDeclaration v = ve.var.isVarDeclaration(); if (v && v.storage_class & STC.manifest) { result = e.ctfeInterpret(); @@ -1852,8 +1852,8 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null) override void visit(StructLiteralExp e) { visit(cast(Expression)e); - if (result.op == EXP.structLiteral) - (cast(StructLiteralExp)result).stype = t; // commit type + if (auto sle = result.isStructLiteralExp()) + sle.stype = t; // commit type } override void visit(StringExp e) @@ -1866,7 +1866,8 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null) //printf("StringExp::castTo(t = %s), '%s' committed = %d\n", t.toChars(), e.toChars(), e.committed); - if (!e.committed && t.ty == Tpointer && t.nextOf().ty == Tvoid) + if (!e.committed && t.ty == Tpointer && t.nextOf().ty == Tvoid && + (!sc || !(sc.flags & SCOPE.Cfile))) { e.error("cannot convert string literal to `void*`"); result = ErrorExp.get(); @@ -1883,7 +1884,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null) if (!e.committed) { - se = cast(StringExp)e.copy(); + se = e.copy().isStringExp(); se.committed = 1; copied = 1; } @@ -1908,7 +1909,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null) { if (!copied) { - se = cast(StringExp)e.copy(); + se = e.copy().isStringExp(); copied = 1; } se.type = t; @@ -1924,7 +1925,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null) */ if (e.committed && tb.ty == Tsarray && typeb.ty == Tarray) { - se = cast(StringExp)e.copy(); + se = e.copy().isStringExp(); d_uns64 szx = tb.nextOf().size(); assert(szx <= 255); se.sz = cast(ubyte)szx; @@ -1952,7 +1953,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null) { if (!copied) { - se = cast(StringExp)e.copy(); + se = e.copy().isStringExp(); copied = 1; } return lcast(); @@ -1961,7 +1962,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null) { if (!copied) { - se = cast(StringExp)e.copy(); + se = e.copy().isStringExp(); copied = 1; } return lcast(); @@ -1977,7 +1978,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null) { if (!copied) { - se = cast(StringExp)e.copy(); + se = e.copy().isStringExp(); copied = 1; } if (tb.ty == Tsarray) @@ -2088,7 +2089,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null) L1: if (!copied) { - se = cast(StringExp)e.copy(); + se = e.copy().isStringExp(); copied = 1; } @@ -2154,10 +2155,10 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null) } // Look for pointers to functions where the functions are overloaded. - if (e.e1.op == EXP.overloadSet && + if (e.e1.isOverExp() && (tb.ty == Tpointer || tb.ty == Tdelegate) && tb.nextOf().ty == Tfunction) { - OverExp eo = cast(OverExp)e.e1; + OverExp eo = e.e1.isOverExp(); FuncDeclaration f = null; for (size_t i = 0; i < eo.vars.a.dim; i++) { @@ -2188,11 +2189,11 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null) } } - if (e.e1.op == EXP.variable && + if (e.e1.isVarExp() && typeb.ty == Tpointer && typeb.nextOf().ty == Tfunction && tb.ty == Tpointer && tb.nextOf().ty == Tfunction) { - auto ve = cast(VarExp)e.e1; + auto ve = e.e1.isVarExp(); auto f = ve.var.isFuncDeclaration(); if (f) { @@ -2303,7 +2304,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null) goto L1; } - ae = cast(ArrayLiteralExp)e.copy(); + ae = e.copy().isArrayLiteralExp(); if (e.basis) ae.basis = e.basis.castTo(sc, tb.nextOf()); ae.elements = e.elements.copy(); @@ -2325,7 +2326,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null) Type tp = typeb.nextOf().pointerTo(); if (!tp.equals(ae.type)) { - ae = cast(ArrayLiteralExp)e.copy(); + ae = e.copy().isArrayLiteralExp(); ae.type = tp; } } @@ -2382,7 +2383,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null) if (tb.ty == Taarray && typeb.ty == Taarray && tb.nextOf().toBasetype().ty != Tvoid) { - AssocArrayLiteralExp ae = cast(AssocArrayLiteralExp)e.copy(); + AssocArrayLiteralExp ae = e.copy().isAssocArrayLiteralExp(); ae.keys = e.keys.copy(); ae.values = e.values.copy(); assert(e.keys.dim == e.values.dim); @@ -2422,7 +2423,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null) { result = e.copy(); result.type = t; - (cast(SymOffExp)result).hasOverloads = false; + result.isSymOffExp().hasOverloads = false; return; } @@ -2641,7 +2642,7 @@ Expression castTo(Expression e, Scope* sc, Type t, Type att = null) { Expression e1x = e.e1.implicitCastTo(sc, t1b); assert(e1x.op != EXP.error); - e = cast(SliceExp)e.copy(); + e = e.copy().isSliceExp(); e.e1 = e1x; e.type = t; result = e; @@ -2751,10 +2752,10 @@ Expression inferType(Expression e, Type t, int flag = 0) if (t) switch (e.op) { - case EXP.arrayLiteral: return visitAle(cast(ArrayLiteralExp) e); - case EXP.assocArrayLiteral: return visitAar(cast(AssocArrayLiteralExp) e); - case EXP.function_: return visitFun(cast(FuncExp) e); - case EXP.question: return visitTer(cast(CondExp) e); + case EXP.arrayLiteral: return visitAle(e.isArrayLiteralExp()); + case EXP.assocArrayLiteral: return visitAar(e.isAssocArrayLiteralExp()); + case EXP.function_: return visitFun(e.isFuncExp()); + case EXP.question: return visitTer(e.isCondExp()); default: } return e; @@ -2830,9 +2831,9 @@ Expression scaleFactor(BinExp be, Scope* sc) */ private bool isVoidArrayLiteral(Expression e, Type other) { - while (e.op == EXP.arrayLiteral && e.type.ty == Tarray && ((cast(ArrayLiteralExp)e).elements.dim == 1)) + while (e.op == EXP.arrayLiteral && e.type.ty == Tarray && (e.isArrayLiteralExp().elements.dim == 1)) { - auto ale = cast(ArrayLiteralExp)e; + auto ale = e.isArrayLiteralExp(); e = ale[0]; if (other.ty == Tsarray || other.ty == Tarray) other = other.nextOf(); @@ -2842,7 +2843,7 @@ private bool isVoidArrayLiteral(Expression e, Type other) if (other.ty != Tsarray && other.ty != Tarray) return false; Type t = e.type; - return (e.op == EXP.arrayLiteral && t.ty == Tarray && t.nextOf().ty == Tvoid && (cast(ArrayLiteralExp)e).elements.dim == 0); + return (e.op == EXP.arrayLiteral && t.ty == Tarray && t.nextOf().ty == Tvoid && e.isArrayLiteralExp().elements.dim == 0); } /** @@ -3463,20 +3464,20 @@ LmodCompare: Expression rhs = e2; // T[x .. y] op ? - if (e1.isSliceExp()) - lhs = new IndexExp(Loc.initial, (cast(UnaExp)e1).e1, IntegerExp.literal!0); + if (auto se1 = e1.isSliceExp()) + lhs = new IndexExp(Loc.initial, se1.e1, IntegerExp.literal!0); // [t1, t2, .. t3] op ? - if (e1.isArrayLiteralExp()) - lhs = (cast(ArrayLiteralExp)e1).opIndex(0); + if (auto ale1 = e1.isArrayLiteralExp()) + lhs = ale1.opIndex(0); // ? op U[z .. t] - if (e2.isSliceExp()) - rhs = new IndexExp(Loc.initial, (cast(UnaExp)e2).e1, IntegerExp.literal!0); + if (auto se2 = e2.isSliceExp()) + rhs = new IndexExp(Loc.initial, se2.e1, IntegerExp.literal!0); // ? op [u1, u2, .. u3] - if (e2.isArrayLiteralExp()) - rhs = (cast(ArrayLiteralExp)e2).opIndex(0); + if (auto ale2 = e2.isArrayLiteralExp()) + rhs = ale2.opIndex(0); // create a new binary expression with the new lhs and rhs (at this stage, at least // one of lhs/rhs has been replaced with the 0'th element of the array it was before) diff --git a/gcc/d/dmd/dinterpret.d b/gcc/d/dmd/dinterpret.d index 8f20c38..fc5b9a8 100644 --- a/gcc/d/dmd/dinterpret.d +++ b/gcc/d/dmd/dinterpret.d @@ -4837,6 +4837,47 @@ public: result = interpret(ce, istate); return; } + else if (fd.ident == Id._d_delstruct) + { + // Only interpret the dtor and the argument. + assert(e.arguments.dim == 1); + + Type tb = (*e.arguments)[0].type.toBasetype(); + auto ts = tb.nextOf().baseElemOf().isTypeStruct(); + if (ts) + { + result = interpretRegion((*e.arguments)[0], istate); + if (exceptionOrCant(result)) + return; + + if (result.op == EXP.null_) + { + result = CTFEExp.voidexp; + return; + } + + if (result.op != EXP.address || + (cast(AddrExp)result).e1.op != EXP.structLiteral) + { + e.error("`delete` on invalid struct pointer `%s`", result.toChars()); + result = CTFEExp.cantexp; + return; + } + + auto sd = ts.sym; + if (sd.dtor) + { + auto sle = cast(StructLiteralExp)(cast(AddrExp)result).e1; + result = interpretFunction(pue, sd.dtor, istate, null, sle); + if (exceptionOrCant(result)) + return; + + result = CTFEExp.voidexp; + } + } + + return; + } } else if (auto soe = ecall.isSymOffExp()) { diff --git a/gcc/d/dmd/dsymbol.d b/gcc/d/dmd/dsymbol.d index 0f75157..e34a94a 100644 --- a/gcc/d/dmd/dsymbol.d +++ b/gcc/d/dmd/dsymbol.d @@ -2453,10 +2453,10 @@ Dsymbol handleSymbolRedeclarations(ref Scope sc, Dsymbol s, Dsymbol s2, ScopeDsy if (i1 && i2) return collision(); // can't both have initializers - if (i1) + if (i1) // vd is the definition { - vd2._init = vd._init; - vd._init = null; + sds.symtab.update(vd); // replace vd2 with the definition + return vd; } /* BUG: the types should match, which needs semantic() to be run on it @@ -2497,14 +2497,10 @@ Dsymbol handleSymbolRedeclarations(ref Scope sc, Dsymbol s, Dsymbol s2, ScopeDsy if (fd.fbody && fd2.fbody) return collision(); // can't both have bodies - if (fd.fbody) + if (fd.fbody) // fd is the definition { - fd2.fbody = fd.fbody; // transfer body to existing declaration - fd.fbody = null; - - auto tf = fd.type.toTypeFunction(); - auto tf2 = fd2.type.toTypeFunction(); - tf2.parameterList = tf.parameterList; // transfer parameter list. + sds.symtab.update(fd); // replace fd2 in symbol table with fd + return fd; } /* BUG: just like with VarDeclaration, the types should match, which needs semantic() to be run on it. diff --git a/gcc/d/dmd/dsymbolsem.d b/gcc/d/dmd/dsymbolsem.d index 3a9abd2d..118b861 100644 --- a/gcc/d/dmd/dsymbolsem.d +++ b/gcc/d/dmd/dsymbolsem.d @@ -3981,15 +3981,15 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor return; TypeFunction tf = ctd.type.toTypeFunction(); + immutable dim = tf.parameterList.length; + auto sd = ad.isStructDeclaration(); /* See if it's the default constructor * But, template constructor should not become a default constructor. */ if (ad && (!ctd.parent.isTemplateInstance() || ctd.parent.isTemplateMixin())) { - immutable dim = tf.parameterList.length; - - if (auto sd = ad.isStructDeclaration()) + if (sd) { if (dim == 0 && tf.parameterList.varargs == VarArg.none) // empty default ctor w/o any varargs { @@ -4034,6 +4034,24 @@ private extern(C++) final class DsymbolSemanticVisitor : Visitor ad.defaultCtor = ctd; } } + // https://issues.dlang.org/show_bug.cgi?id=22593 + else if (auto ti = ctd.parent.isTemplateInstance()) + { + if (sd && sd.hasCopyCtor && (dim == 1 || (dim > 1 && tf.parameterList[1].defaultArg))) + { + auto param = tf.parameterList[0]; + + // if the template instance introduces an rvalue constructor + // between the members of a struct declaration, we should check if a + // copy constructor exists and issue an error in that case. + if (!(param.storageClass & STC.ref_) && param.type.mutableOf().unSharedOf() == sd.type.mutableOf().unSharedOf()) + { + .error(ctd.loc, "Cannot define both an rvalue constructor and a copy constructor for `struct %s`", sd.toChars); + .errorSupplemental(ti.loc, "Template instance `%s` creates a rvalue constructor for `struct %s`", + ti.toChars(), sd.toChars()); + } + } + } } override void visit(PostBlitDeclaration pbd) diff --git a/gcc/d/dmd/expressionsem.d b/gcc/d/dmd/expressionsem.d index 48e47ce..2f1cd47 100644 --- a/gcc/d/dmd/expressionsem.d +++ b/gcc/d/dmd/expressionsem.d @@ -7316,6 +7316,8 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor deprecation(exp.loc, "The `delete` keyword has been deprecated. Use `object.destroy()` (and `core.memory.GC.free()` if applicable) instead."); } + Expression e = exp; + if (Expression ex = unaSemantic(exp, sc)) { result = ex; @@ -7352,7 +7354,27 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor if (tb.ty == Tstruct) { ad = (cast(TypeStruct)tb).sym; - semanticTypeInfo(sc, tb); + + Identifier hook = global.params.tracegc ? Id._d_delstructTrace : Id._d_delstruct; + if (!verifyHookExist(exp.loc, *sc, Id._d_delstructImpl, "deleting struct with dtor", Id.object)) + return setError(); + + // Lower to .object._d_delstruct{,Trace}(exp.e1) + Expression id = new IdentifierExp(exp.loc, Id.empty); + id = new DotIdExp(exp.loc, id, Id.object); + + auto tiargs = new Objects(); + tiargs.push(exp.e1.type); + id = new DotTemplateInstanceExp(exp.loc, id, Id._d_delstructImpl, tiargs); + id = new DotIdExp(exp.loc, id, hook); + + e = new CallExp(exp.loc, id, exp.e1); + /* Gag errors generated by calls to `_d_delstruct`, because they display + * internal compiler information, which is unnecessary to the user. + */ + uint errors = global.startGagging(); + e = e.expressionSemantic(sc); + global.endGagging(errors); } break; @@ -7397,7 +7419,7 @@ private extern (C++) final class ExpressionSemanticVisitor : Visitor if (err) return setError(); - result = exp; + result = e; } override void visit(CastExp exp) diff --git a/gcc/d/dmd/id.d b/gcc/d/dmd/id.d index 83c89c0..d33b676 100644 --- a/gcc/d/dmd/id.d +++ b/gcc/d/dmd/id.d @@ -311,6 +311,9 @@ immutable Msgtable[] msgtable = { "__ArrayPostblit" }, { "__ArrayDtor" }, { "_d_delThrowable" }, + { "_d_delstructImpl" }, + { "_d_delstruct" }, + { "_d_delstructTrace" }, { "_d_assert_fail" }, { "dup" }, { "_aaApply" }, diff --git a/gcc/d/dmd/initsem.d b/gcc/d/dmd/initsem.d index 51ee27d..826b0ad 100644 --- a/gcc/d/dmd/initsem.d +++ b/gcc/d/dmd/initsem.d @@ -284,7 +284,7 @@ extern(C++) Initializer initializerSemantic(Initializer init, Scope* sc, ref Typ case Tarray: break; case Tvector: - t = (cast(TypeVector)t).basetype; + t = t.isTypeVector().basetype; break; case Taarray: case Tstruct: // consider implicit constructor call @@ -346,7 +346,7 @@ extern(C++) Initializer initializerSemantic(Initializer init, Scope* sc, ref Typ // found a tuple, expand it if (ei && ei.exp.op == EXP.tuple) { - TupleExp te = cast(TupleExp)ei.exp; + TupleExp te = ei.exp.isTupleExp(); i.index.remove(j); i.value.remove(j); for (size_t k = 0; k < te.exps.dim; ++k) @@ -462,7 +462,7 @@ extern(C++) Initializer initializerSemantic(Initializer init, Scope* sc, ref Typ { return i; // Failed, suppress duplicate error messages } - if (i.exp.type.ty == Ttuple && (cast(TypeTuple)i.exp.type).arguments.dim == 0) + if (i.exp.type.isTypeTuple() && i.exp.type.isTypeTuple().arguments.dim == 0) { Type et = i.exp.type; i.exp = new TupleExp(i.exp.loc, new Expressions()); @@ -492,12 +492,12 @@ extern(C++) Initializer initializerSemantic(Initializer init, Scope* sc, ref Typ */ if (i.exp.op == EXP.string_ && tb.ty == Tsarray) { - StringExp se = cast(StringExp)i.exp; + StringExp se = i.exp.isStringExp(); Type typeb = se.type.toBasetype(); TY tynto = tb.nextOf().ty; if (!se.committed && (typeb.ty == Tarray || typeb.ty == Tsarray) && tynto.isSomeChar && - se.numberOfCodeUnits(tynto) < (cast(TypeSArray)tb).dim.toInteger()) + se.numberOfCodeUnits(tynto) < tb.isTypeSArray().dim.toInteger()) { i.exp = se.castTo(sc, t); goto L1; @@ -520,7 +520,7 @@ extern(C++) Initializer initializerSemantic(Initializer init, Scope* sc, ref Typ // Look for implicit constructor call if (tb.ty == Tstruct && !(ti.ty == Tstruct && tb.toDsymbol(sc) == ti.toDsymbol(sc)) && !i.exp.implicitConvTo(t)) { - StructDeclaration sd = (cast(TypeStruct)tb).sym; + StructDeclaration sd = tb.isTypeStruct().sym; if (sd.ctor) { // Rewrite as S().ctor(exp) @@ -573,18 +573,16 @@ extern(C++) Initializer initializerSemantic(Initializer init, Scope* sc, ref Typ // better diagnostic message, as same as AssignExp::semantic. if (tb.ty == Tsarray && i.exp.implicitConvTo(tb.nextOf().arrayOf()) > MATCH.nomatch) { - uinteger_t dim1 = (cast(TypeSArray)tb).dim.toInteger(); + uinteger_t dim1 = tb.isTypeSArray().dim.toInteger(); uinteger_t dim2 = dim1; - if (i.exp.op == EXP.arrayLiteral) + if (auto ale = i.exp.isArrayLiteralExp()) { - ArrayLiteralExp ale = cast(ArrayLiteralExp)i.exp; dim2 = ale.elements ? ale.elements.dim : 0; } - else if (i.exp.op == EXP.slice) + else if (auto se = i.exp.isSliceExp()) { - Type tx = toStaticArrayType(cast(SliceExp)i.exp); - if (tx) - dim2 = (cast(TypeSArray)tx).dim.toInteger(); + if (Type tx = toStaticArrayType(se)) + dim2 = tx.isTypeSArray().dim.toInteger(); } if (dim1 != dim2) { @@ -746,10 +744,11 @@ extern(C++) Initializer initializerSemantic(Initializer init, Scope* sc, ref Typ * Params: * t = element type * dim = max number of elements + * simple = true if array of simple elements * Returns: * # of elements in array */ - size_t array(Type t, size_t dim) + size_t array(Type t, size_t dim, ref bool simple) { //printf(" type %s i %d dim %d dil.length = %d\n", t.toChars(), cast(int)i, cast(int)dim, cast(int)dil.length); auto tn = t.nextOf().toBasetype(); @@ -791,14 +790,30 @@ extern(C++) Initializer initializerSemantic(Initializer init, Scope* sc, ref Typ if (tnsa && di.initializer.isExpInitializer()) { // no braces enclosing array initializer, so recurse - array(tnsa, nelems); + array(tnsa, nelems, simple); } else if (auto tns = tn.isTypeStruct()) { - if (di.initializer.isExpInitializer()) + if (auto ei = di.initializer.isExpInitializer()) { // no braces enclosing struct initializer - dil[n].initializer = structs(tns); + + /* Disambiguate between an exp representing the entire + * struct, and an exp representing the first field of the struct + */ + if (needInterpret) + sc = sc.startCTFE(); + ei.exp = ei.exp.expressionSemantic(sc); + ei.exp = resolveProperties(sc, ei.exp); + if (needInterpret) + sc = sc.endCTFE(); + if (ei.exp.implicitConvTo(tn)) + di.initializer = elem(di.initializer); // the whole struct + else + { + simple = false; + dil[n].initializer = structs(tns); // the first field + } } else dil[n].initializer = elem(di.initializer); @@ -816,7 +831,8 @@ extern(C++) Initializer initializerSemantic(Initializer init, Scope* sc, ref Typ } size_t dim = tsa.isIncomplete() ? dil.length : cast(size_t)tsa.dim.toInteger(); - auto newdim = array(t, dim); + bool simple = true; + auto newdim = array(t, dim, simple); if (errors) return err(); @@ -849,7 +865,7 @@ extern(C++) Initializer initializerSemantic(Initializer init, Scope* sc, ref Typ /* If an array of simple elements, replace with an ArrayInitializer */ auto tnb = tn.toBasetype(); - if (!(tnb.isTypeSArray() || tnb.isTypeStruct())) + if (!tnb.isTypeSArray() && (!tnb.isTypeStruct() || simple)) { auto ai = new ArrayInitializer(ci.loc); ai.dim = cast(uint) dil.length; @@ -884,12 +900,12 @@ extern(C++) Initializer initializerSemantic(Initializer init, Scope* sc, ref Typ final switch (init.kind) { - case InitKind.void_: return visitVoid (cast( VoidInitializer)init); - case InitKind.error: return visitError (cast( ErrorInitializer)init); - case InitKind.struct_: return visitStruct(cast(StructInitializer)init); - case InitKind.array: return visitArray (cast( ArrayInitializer)init); - case InitKind.exp: return visitExp (cast( ExpInitializer)init); - case InitKind.C_: return visitC (cast( CInitializer)init); + case InitKind.void_: return visitVoid (init.isVoidInitializer()); + case InitKind.error: return visitError (init.isErrorInitializer()); + case InitKind.struct_: return visitStruct(init.isStructInitializer()); + case InitKind.array: return visitArray (init.isArrayInitializer()); + case InitKind.exp: return visitExp (init.isExpInitializer()); + case InitKind.C_: return visitC (init.isCInitializer()); } } @@ -943,8 +959,7 @@ Initializer inferType(Initializer init, Scope* sc) { return iz; } - assert(iz.isExpInitializer()); - (*values)[i] = (cast(ExpInitializer)iz).exp; + (*values)[i] = iz.isExpInitializer().exp; assert(!(*values)[i].isErrorExp()); } Expression e = new AssocArrayLiteralExp(init.loc, keys, values); @@ -966,8 +981,7 @@ Initializer inferType(Initializer init, Scope* sc) { return iz; } - assert(iz.isExpInitializer()); - (*elements)[i] = (cast(ExpInitializer)iz).exp; + (*elements)[i] = iz.isExpInitializer().exp; assert(!(*elements)[i].isErrorExp()); } Expression e = new ArrayLiteralExp(init.loc, null, elements); @@ -996,9 +1010,8 @@ Initializer inferType(Initializer init, Scope* sc) init.exp = resolveAliasThis(sc, init.exp); init.exp = resolveProperties(sc, init.exp); - if (init.exp.op == EXP.scope_) + if (auto se = init.exp.isScopeExp()) { - ScopeExp se = cast(ScopeExp)init.exp; TemplateInstance ti = se.sds.isTemplateInstance(); if (ti && ti.semanticRun == PASS.semantic && !ti.aliasdecl) se.error("cannot infer type from %s `%s`, possible circular dependency", se.sds.kind(), se.toChars()); @@ -1021,16 +1034,15 @@ Initializer inferType(Initializer init, Scope* sc) return new ErrorInitializer(); } } - if (init.exp.op == EXP.address) + if (auto ae = init.exp.isAddrExp()) { - AddrExp ae = cast(AddrExp)init.exp; if (ae.e1.op == EXP.overloadSet) { init.exp.error("cannot infer type from overloaded function symbol `%s`", init.exp.toChars()); return new ErrorInitializer(); } } - if (init.exp.op == EXP.error) + if (init.exp.isErrorExp()) { return new ErrorInitializer(); } @@ -1050,12 +1062,12 @@ Initializer inferType(Initializer init, Scope* sc) final switch (init.kind) { - case InitKind.void_: return visitVoid (cast( VoidInitializer)init); - case InitKind.error: return visitError (cast( ErrorInitializer)init); - case InitKind.struct_: return visitStruct(cast(StructInitializer)init); - case InitKind.array: return visitArray (cast( ArrayInitializer)init); - case InitKind.exp: return visitExp (cast( ExpInitializer)init); - case InitKind.C_: return visitC (cast( CInitializer)init); + case InitKind.void_: return visitVoid (init.isVoidInitializer()); + case InitKind.error: return visitError (init.isErrorInitializer()); + case InitKind.struct_: return visitStruct(init.isStructInitializer()); + case InitKind.array: return visitArray (init.isArrayInitializer()); + case InitKind.exp: return visitExp (init.isExpInitializer()); + case InitKind.C_: return visitC (init.isCInitializer()); } } @@ -1260,12 +1272,12 @@ extern (C++) Expression initializerToExpression(Initializer init, Type itype = n final switch (init.kind) { - case InitKind.void_: return visitVoid (cast( VoidInitializer)init); - case InitKind.error: return visitError (cast( ErrorInitializer)init); - case InitKind.struct_: return visitStruct(cast(StructInitializer)init); - case InitKind.array: return visitArray (cast( ArrayInitializer)init); - case InitKind.exp: return visitExp (cast( ExpInitializer)init); - case InitKind.C_: return visitC (cast( CInitializer)init); + case InitKind.void_: return visitVoid (init.isVoidInitializer()); + case InitKind.error: return visitError (init.isErrorInitializer()); + case InitKind.struct_: return visitStruct(init.isStructInitializer()); + case InitKind.array: return visitArray (init.isArrayInitializer()); + case InitKind.exp: return visitExp (init.isExpInitializer()); + case InitKind.C_: return visitC (init.isCInitializer()); } } @@ -1308,7 +1320,7 @@ private bool hasNonConstPointers(Expression e) { if (ae.type.nextOf().hasPointers() && checkArray(ae.values)) return true; - if ((cast(TypeAArray)ae.type).index.hasPointers()) + if (ae.type.isTypeAArray().index.hasPointers()) return checkArray(ae.keys); return false; } diff --git a/gcc/d/dmd/nogc.d b/gcc/d/dmd/nogc.d index 31a25a7..8bdddb8 100644 --- a/gcc/d/dmd/nogc.d +++ b/gcc/d/dmd/nogc.d @@ -83,6 +83,20 @@ public: } f.printGCUsage(e.loc, "setting `length` may cause a GC allocation"); } + else if (fd.ident == Id._d_delstruct) + { + // In expressionsem.d, `delete s` was lowererd to `_d_delstruct(s)`. + // The following code handles the call like the original expression, + // so the error is menaningful to the user. + if (f.setGC()) + { + e.error("cannot use `delete` in `@nogc` %s `%s`", f.kind(), + f.toPrettyChars()); + err = true; + return; + } + f.printGCUsage(e.loc, "`delete` requires the GC"); + } } override void visit(ArrayLiteralExp e) diff --git a/gcc/d/dmd/semantic3.d b/gcc/d/dmd/semantic3.d index da328fd..49b87d6 100644 --- a/gcc/d/dmd/semantic3.d +++ b/gcc/d/dmd/semantic3.d @@ -419,8 +419,7 @@ private extern(C++) final class Semantic3Visitor : Visitor sc2.insert(_arguments); _arguments.parent = funcdecl; } - if ((f.linkage == LINK.d || f.parameterList.length) && - !(sc.flags & SCOPE.Cfile)) // don't want to require importing stdarg for C files + if (f.linkage == LINK.d || f.parameterList.length) { // Declare _argptr Type t = target.va_listType(funcdecl.loc, sc); diff --git a/gcc/d/dmd/tokens.d b/gcc/d/dmd/tokens.d index 2d98d5e..e5b05d3 100644 --- a/gcc/d/dmd/tokens.d +++ b/gcc/d/dmd/tokens.d @@ -23,7 +23,7 @@ import dmd.common.outbuffer; import dmd.root.rmem; import dmd.utf; -enum TOK : ushort +enum TOK : ubyte { reserved, @@ -84,10 +84,7 @@ enum TOK : ushort rightShiftAssign, unsignedRightShift, unsignedRightShiftAssign, - concatenate, concatenateAssign, // ~= - concatenateElemAssign, - concatenateDcharAssign, add, min, addAssign, @@ -109,15 +106,11 @@ enum TOK : ushort tilde, plusPlus, minusMinus, - construct, - blit, dot, comma, question, andAnd, orOr, - prePlusPlus, - preMinusMinus, // Numeric literals int32Literal, @@ -144,7 +137,6 @@ enum TOK : ushort hexadecimalString, this_, super_, - tuple, error, // Basic types @@ -244,7 +236,6 @@ enum TOK : ushort parameters, traits, - overloadSet, pure_, nothrow_, gshared, @@ -564,7 +555,6 @@ private immutable TOK[] keywords = TOK.gshared, TOK.traits, TOK.vector, - TOK.overloadSet, TOK.file, TOK.fileFullPath, TOK.line, @@ -769,7 +759,6 @@ extern (C++) struct Token TOK.gshared: "__gshared", TOK.traits: "__traits", TOK.vector: "__vector", - TOK.overloadSet: "__overloadset", TOK.file: "__FILE__", TOK.fileFullPath: "__FILE_FULL_PATH__", TOK.line: "__LINE__", @@ -793,8 +782,6 @@ extern (C++) struct Token TOK.xor: "^", TOK.xorAssign: "^=", TOK.assign: "=", - TOK.construct: "=", - TOK.blit: "=", TOK.lessThan: "<", TOK.greaterThan: ">", TOK.lessOrEqual: "<=", @@ -824,8 +811,6 @@ extern (C++) struct Token TOK.dollar: "$", TOK.plusPlus: "++", TOK.minusMinus: "--", - TOK.prePlusPlus: "++", - TOK.preMinusMinus: "--", TOK.type: "type", TOK.question: "?", TOK.negate: "-", @@ -842,9 +827,6 @@ extern (C++) struct Token TOK.andAssign: "&=", TOK.orAssign: "|=", TOK.concatenateAssign: "~=", - TOK.concatenateElemAssign: "~=", - TOK.concatenateDcharAssign: "~=", - TOK.concatenate: "~", TOK.call: "call", TOK.identity: "is", TOK.notIdentity: "!is", @@ -860,7 +842,6 @@ extern (C++) struct Token // For debugging TOK.error: "error", TOK.string_: "string", - TOK.tuple: "tuple", TOK.declaration: "declaration", TOK.onScopeExit: "scope(exit)", TOK.onScopeSuccess: "scope(success)", @@ -1113,11 +1094,6 @@ nothrow: return toString(value).ptr; } - static const(char)* toChars(ushort value) - { - return toString(cast(TOK)value).ptr; - } - extern (D) static string toString(TOK value) pure nothrow @nogc @safe { return tochars[value]; diff --git a/gcc/d/dmd/tokens.h b/gcc/d/dmd/tokens.h index 2e1d1f4..e095aa6 100644 --- a/gcc/d/dmd/tokens.h +++ b/gcc/d/dmd/tokens.h @@ -32,7 +32,7 @@ class Identifier; ? && || */ -enum class TOK : unsigned short +enum class TOK : unsigned char { reserved, @@ -93,10 +93,7 @@ enum class TOK : unsigned short rightShiftAssign, unsignedRightShift, unsignedRightShiftAssign, - concatenate, concatenateAssign, // ~= - concatenateElemAssign, - concatenateDcharAssign, add, min, addAssign, @@ -118,15 +115,11 @@ enum class TOK : unsigned short tilde, plusPlus, minusMinus, - construct, - blit, dot, comma, question, andAnd, orOr, - prePlusPlus, - preMinusMinus, // Numeric literals int32Literal, // 104, @@ -153,7 +146,6 @@ enum class TOK : unsigned short hexadecimalString, this_, super_, - tuple, error, // Basic types @@ -253,7 +245,6 @@ enum class TOK : unsigned short parameters, // 210 traits, - overloadSet, pure_, nothrow_, gshared, diff --git a/gcc/d/dmd/typesem.d b/gcc/d/dmd/typesem.d index 2a86416..b8c4317 100644 --- a/gcc/d/dmd/typesem.d +++ b/gcc/d/dmd/typesem.d @@ -271,7 +271,7 @@ private void resolveHelper(TypeQualified mt, const ref Loc loc, Scope* sc, Dsymb sm = null; } // Same check as in Expression.semanticY(DotIdExp) - else if (sm.isPackage() && checkAccess(sc, cast(Package)sm)) + else if (sm.isPackage() && checkAccess(sc, sm.isPackage())) { // @@@DEPRECATED_2.096@@@ // Should be an error in 2.106. Just remove the deprecation call @@ -509,7 +509,7 @@ private Type stripDefaultArgs(Type t) Parameters* params = stripParams(tf.parameterList.parameters); if (tret == tf.next && params == tf.parameterList.parameters) return t; - TypeFunction tr = cast(TypeFunction)tf.copy(); + TypeFunction tr = tf.copy().isTypeFunction(); tr.parameterList.parameters = params; tr.next = tret; //printf("strip %s\n <- %s\n", tr.toChars(), t.toChars()); @@ -520,7 +520,7 @@ private Type stripDefaultArgs(Type t) Parameters* args = stripParams(tt.arguments); if (args == tt.arguments) return t; - TypeTuple tr = cast(TypeTuple)t.copy(); + TypeTuple tr = t.copy().isTypeTuple(); tr.arguments = args; return tr; } @@ -588,11 +588,11 @@ Expression typeToExpression(Type t) return null; switch (t.ty) { - case Tsarray: return visitSArray(cast(TypeSArray) t); - case Taarray: return visitAArray(cast(TypeAArray) t); - case Tident: return visitIdentifier(cast(TypeIdentifier) t); - case Tinstance: return visitInstance(cast(TypeInstance) t); - case Tmixin: return visitMixin(cast(TypeMixin) t); + case Tsarray: return visitSArray(t.isTypeSArray()); + case Taarray: return visitAArray(t.isTypeAArray()); + case Tident: return visitIdentifier(t.isTypeIdentifier()); + case Tinstance: return visitInstance(t.isTypeInstance()); + case Tmixin: return visitMixin(t.isTypeMixin()); default: return null; } } @@ -684,7 +684,7 @@ extern(C++) Type typeSemantic(Type type, const ref Loc loc, Scope* sc) .error(loc, "T in __vector(T) must be a static array, not `%s`", mtype.basetype.toChars()); return error(); } - TypeSArray t = cast(TypeSArray)mtype.basetype; + TypeSArray t = mtype.basetype.isTypeSArray(); const sz = cast(int)t.size(loc); final switch (target.isVectorTypeSupported(sz, t.nextOf())) { @@ -790,8 +790,8 @@ extern(C++) Type typeSemantic(Type type, const ref Loc loc, Scope* sc) return overflowError(); Type tbx = tbn.baseElemOf(); - if (tbx.ty == Tstruct && !(cast(TypeStruct)tbx).sym.members || - tbx.ty == Tenum && !(cast(TypeEnum)tbx).sym.members) + if (tbx.ty == Tstruct && !tbx.isTypeStruct().sym.members || + tbx.ty == Tenum && !tbx.isTypeEnum().sym.members) { /* To avoid meaningless error message, skip the total size limit check * when the bottom of element type is opaque. @@ -802,7 +802,7 @@ extern(C++) Type typeSemantic(Type type, const ref Loc loc, Scope* sc) tbn.ty == Tarray || tbn.ty == Tsarray || tbn.ty == Taarray || - (tbn.ty == Tstruct && ((cast(TypeStruct)tbn).sym.sizeok == Sizeok.done)) || + (tbn.ty == Tstruct && tbn.isTypeStruct().sym.sizeok == Sizeok.done) || tbn.ty == Tclass) { /* Only do this for types that don't need to have semantic() @@ -819,7 +819,7 @@ extern(C++) Type typeSemantic(Type type, const ref Loc loc, Scope* sc) { // Index the tuple to get the type assert(mtype.dim); - TypeTuple tt = cast(TypeTuple)tbn; + TypeTuple tt = tbn.isTypeTuple(); uinteger_t d = mtype.dim.toUInteger(); if (d >= tt.arguments.dim) { @@ -1026,9 +1026,9 @@ extern(C++) Type typeSemantic(Type type, const ref Loc loc, Scope* sc) */ } } - else if (tbase.ty == Tclass && !(cast(TypeClass)tbase).sym.isInterfaceDeclaration()) + else if (tbase.ty == Tclass && !tbase.isTypeClass().sym.isInterfaceDeclaration()) { - ClassDeclaration cd = (cast(TypeClass)tbase).sym; + ClassDeclaration cd = tbase.isTypeClass().sym; if (cd.semanticRun < PASS.semanticdone) cd.dsymbolSemantic(null); @@ -1275,7 +1275,7 @@ extern(C++) Type typeSemantic(Type type, const ref Loc loc, Scope* sc) } if (e.op == EXP.function_) // https://issues.dlang.org/show_bug.cgi?id=4820 { - FuncExp fe = cast(FuncExp)e; + FuncExp fe = e.isFuncExp(); // Replace function literal with a function symbol, // since default arg expression must be copied when used // and copying the literal itself is wrong. @@ -1402,8 +1402,8 @@ extern(C++) Type typeSemantic(Type type, const ref Loc loc, Scope* sc) (t.ty == Tstruct || t.ty == Tsarray || t.ty == Tenum)) { Type tb2 = t.baseElemOf(); - if (tb2.ty == Tstruct && !(cast(TypeStruct)tb2).sym.members || - tb2.ty == Tenum && !(cast(TypeEnum)tb2).sym.memtype) + if (tb2.ty == Tstruct && !tb2.isTypeStruct().sym.members || + tb2.ty == Tenum && !tb2.isTypeEnum().sym.memtype) { if (global.params.previewIn && (fparam.storageClass & STC.in_)) { @@ -1467,7 +1467,7 @@ extern(C++) Type typeSemantic(Type type, const ref Loc loc, Scope* sc) else { Type tv = t.baseElemOf(); - if (tv.ty == Tstruct && (cast(TypeStruct)tv).sym.noDefaultCtor) + if (tv.ty == Tstruct && tv.isTypeStruct().sym.noDefaultCtor) { .error(loc, "cannot have `out` parameter of type `%s` because the default construction is disabled", fparam.type.toChars()); errors = true; @@ -1824,26 +1824,26 @@ extern(C++) Type typeSemantic(Type type, const ref Loc loc, Scope* sc) switch (e.op) { case EXP.dotVariable: - mtype.sym = (cast(DotVarExp)e).var; + mtype.sym = e.isDotVarExp().var; break; case EXP.variable: - mtype.sym = (cast(VarExp)e).var; + mtype.sym = e.isVarExp().var; break; case EXP.function_: - auto fe = cast(FuncExp)e; + auto fe = e.isFuncExp(); mtype.sym = fe.td ? fe.td : fe.fd; break; case EXP.dotTemplateDeclaration: - mtype.sym = (cast(DotTemplateExp)e).td; + mtype.sym = e.isDotTemplateExp().td; break; case EXP.dSymbol: - mtype.sym = (cast(DsymbolExp)e).s; + mtype.sym = e.isDsymbolExp().s; break; case EXP.template_: - mtype.sym = (cast(TemplateExp)e).td; + mtype.sym = e.isTemplateExp().td; break; case EXP.scope_: - mtype.sym = (cast(ScopeExp)e).sds; + mtype.sym = e.isScopeExp().sds; break; case EXP.tuple: TupleExp te = e.toTupleExp(); @@ -1854,13 +1854,13 @@ extern(C++) Type typeSemantic(Type type, const ref Loc loc, Scope* sc) switch (src.op) { case EXP.type: - (*elems)[i] = (cast(TypeExp)src).type; + (*elems)[i] = src.isTypeExp().type; break; case EXP.dotType: - (*elems)[i] = (cast(DotTypeExp)src).sym.isType(); + (*elems)[i] = src.isDotTypeExp().sym.isType(); break; case EXP.overloadSet: - (*elems)[i] = (cast(OverExp)src).type; + (*elems)[i] = src.isOverExp().type; break; default: if (auto sym = isDsymbol(src)) @@ -1873,13 +1873,13 @@ extern(C++) Type typeSemantic(Type type, const ref Loc loc, Scope* sc) mtype.sym = td; break; case EXP.dotType: - result = (cast(DotTypeExp)e).sym.isType(); + result = e.isDotTypeExp().sym.isType(); break; case EXP.type: - result = (cast(TypeExp)e).type; + result = e.isTypeExp().type; break; case EXP.overloadSet: - result = (cast(OverExp)e).type; + result = e.isOverExp().type; break; default: break; @@ -2211,26 +2211,26 @@ extern(C++) Type typeSemantic(Type type, const ref Loc loc, Scope* sc) switch (type.ty) { default: return visitType(type); - case Tvector: return visitVector(cast(TypeVector)type); - case Tsarray: return visitSArray(cast(TypeSArray)type); - case Tarray: return visitDArray(cast(TypeDArray)type); - case Taarray: return visitAArray(cast(TypeAArray)type); - case Tpointer: return visitPointer(cast(TypePointer)type); - case Treference: return visitReference(cast(TypeReference)type); - case Tfunction: return visitFunction(cast(TypeFunction)type); - case Tdelegate: return visitDelegate(cast(TypeDelegate)type); - case Tident: return visitIdentifier(cast(TypeIdentifier)type); - case Tinstance: return visitInstance(cast(TypeInstance)type); - case Ttypeof: return visitTypeof(cast(TypeTypeof)type); - case Ttraits: return visitTraits(cast(TypeTraits)type); - case Treturn: return visitReturn(cast(TypeReturn)type); - case Tstruct: return visitStruct(cast(TypeStruct)type); - case Tenum: return visitEnum(cast(TypeEnum)type); - case Tclass: return visitClass(cast(TypeClass)type); - case Ttuple: return visitTuple (cast(TypeTuple)type); - case Tslice: return visitSlice(cast(TypeSlice)type); - case Tmixin: return visitMixin(cast(TypeMixin)type); - case Ttag: return visitTag(cast(TypeTag)type); + case Tvector: return visitVector(type.isTypeVector()); + case Tsarray: return visitSArray(type.isTypeSArray()); + case Tarray: return visitDArray(type.isTypeDArray()); + case Taarray: return visitAArray(type.isTypeAArray()); + case Tpointer: return visitPointer(type.isTypePointer()); + case Treference: return visitReference(type.isTypeReference()); + case Tfunction: return visitFunction(type.isTypeFunction()); + case Tdelegate: return visitDelegate(type.isTypeDelegate()); + case Tident: return visitIdentifier(type.isTypeIdentifier()); + case Tinstance: return visitInstance(type.isTypeInstance()); + case Ttypeof: return visitTypeof(type.isTypeTypeof()); + case Ttraits: return visitTraits(type.isTypeTraits()); + case Treturn: return visitReturn(type.isTypeReturn()); + case Tstruct: return visitStruct(type.isTypeStruct()); + case Tenum: return visitEnum(type.isTypeEnum()); + case Tclass: return visitClass(type.isTypeClass()); + case Ttuple: return visitTuple(type.isTypeTuple()); + case Tslice: return visitSlice(type.isTypeSlice()); + case Tmixin: return visitMixin(type.isTypeMixin()); + case Ttag: return visitTag(type.isTypeTag()); } } @@ -2300,7 +2300,7 @@ extern (C++) Type merge(Type type) case Tsarray: // prevents generating the mangle if the array dim is not yet known - if (!(cast(TypeSArray) type).dim.isIntegerExp()) + if (!type.isTypeSArray().dim.isIntegerExp()) return type; goto default; @@ -2308,7 +2308,7 @@ extern (C++) Type merge(Type type) break; case Taarray: - if (!(cast(TypeAArray)type).index.merge().deco) + if (!type.isTypeAArray().index.merge().deco) return type; goto default; @@ -2761,10 +2761,10 @@ Expression getProperty(Type t, Scope* scope_, const ref Loc loc, Identifier iden visitBasic(cast(TypeBasic)t) : visitType(t); - case Terror: return visitError (cast(TypeError)t); - case Tvector: return visitVector(cast(TypeVector)t); - case Tenum: return visitEnum (cast(TypeEnum)t); - case Ttuple: return visitTuple (cast(TypeTuple)t); + case Terror: return visitError (t.isTypeError()); + case Tvector: return visitVector(t.isTypeVector()); + case Tenum: return visitEnum (t.isTypeEnum()); + case Ttuple: return visitTuple (t.isTypeTuple()); } } @@ -2889,7 +2889,7 @@ void resolve(Type mt, const ref Loc loc, Scope* sc, out Expression pe, out Type { Expression e = cast(Expression)o; if (e.op == EXP.dSymbol) - return returnSymbol((cast(DsymbolExp)e).s); + return returnSymbol(e.isDsymbolExp().s); else return returnExp(e); } @@ -3154,8 +3154,8 @@ void resolve(Type mt, const ref Loc loc, Scope* sc, out Expression pe, out Type * template functions. */ } - if (auto f = mt.exp.op == EXP.variable ? (cast( VarExp)mt.exp).var.isFuncDeclaration() - : mt.exp.op == EXP.dotVariable ? (cast(DotVarExp)mt.exp).var.isFuncDeclaration() : null) + if (auto f = mt.exp.op == EXP.variable ? mt.exp.isVarExp().var.isFuncDeclaration() + : mt.exp.op == EXP.dotVariable ? mt.exp.isDotVarExp().var.isFuncDeclaration() : null) { // f might be a unittest declaration which is incomplete when compiled // without -unittest. That causes a segfault in checkForwardRef, see @@ -3350,17 +3350,17 @@ void resolve(Type mt, const ref Loc loc, Scope* sc, out Expression pe, out Type switch (mt.ty) { - default: visitType (mt); break; - case Tsarray: visitSArray (cast(TypeSArray)mt); break; - case Tarray: visitDArray (cast(TypeDArray)mt); break; - case Taarray: visitAArray (cast(TypeAArray)mt); break; - case Tident: visitIdentifier(cast(TypeIdentifier)mt); break; - case Tinstance: visitInstance (cast(TypeInstance)mt); break; - case Ttypeof: visitTypeof (cast(TypeTypeof)mt); break; - case Treturn: visitReturn (cast(TypeReturn)mt); break; - case Tslice: visitSlice (cast(TypeSlice)mt); break; - case Tmixin: visitMixin (cast(TypeMixin)mt); break; - case Ttraits: visitTraits (cast(TypeTraits)mt); break; + default: visitType (mt); break; + case Tsarray: visitSArray (mt.isTypeSArray()); break; + case Tarray: visitDArray (mt.isTypeDArray()); break; + case Taarray: visitAArray (mt.isTypeAArray()); break; + case Tident: visitIdentifier(mt.isTypeIdentifier()); break; + case Tinstance: visitInstance (mt.isTypeInstance()); break; + case Ttypeof: visitTypeof (mt.isTypeTypeof()); break; + case Treturn: visitReturn (mt.isTypeReturn()); break; + case Tslice: visitSlice (mt.isTypeSlice()); break; + case Tmixin: visitMixin (mt.isTypeMixin()); break; + case Ttraits: visitTraits (mt.isTypeTraits()); break; } } @@ -4616,16 +4616,16 @@ Expression dotExp(Type mt, Scope* sc, Expression e, Identifier ident, int flag) switch (mt.ty) { - case Tvector: return visitVector (cast(TypeVector)mt); - case Tsarray: return visitSArray (cast(TypeSArray)mt); - case Tstruct: return visitStruct (cast(TypeStruct)mt); - case Tenum: return visitEnum (cast(TypeEnum)mt); - case Terror: return visitError (cast(TypeError)mt); - case Tarray: return visitDArray (cast(TypeDArray)mt); - case Taarray: return visitAArray (cast(TypeAArray)mt); - case Treference: return visitReference(cast(TypeReference)mt); - case Tdelegate: return visitDelegate (cast(TypeDelegate)mt); - case Tclass: return visitClass (cast(TypeClass)mt); + case Tvector: return visitVector (mt.isTypeVector()); + case Tsarray: return visitSArray (mt.isTypeSArray()); + case Tstruct: return visitStruct (mt.isTypeStruct()); + case Tenum: return visitEnum (mt.isTypeEnum()); + case Terror: return visitError (mt.isTypeError()); + case Tarray: return visitDArray (mt.isTypeDArray()); + case Taarray: return visitAArray (mt.isTypeAArray()); + case Treference: return visitReference(mt.isTypeReference()); + case Tdelegate: return visitDelegate (mt.isTypeDelegate()); + case Tclass: return visitClass (mt.isTypeClass()); default: return mt.isTypeBasic() ? visitBasic(cast(TypeBasic)mt) @@ -4786,12 +4786,12 @@ extern (C++) Expression defaultInit(Type mt, const ref Loc loc, const bool isCfi switch (mt.ty) { - case Tvector: return visitVector (cast(TypeVector)mt); - case Tsarray: return visitSArray (cast(TypeSArray)mt); - case Tfunction: return visitFunction(cast(TypeFunction)mt); - case Tstruct: return visitStruct (cast(TypeStruct)mt); - case Tenum: return visitEnum (cast(TypeEnum)mt); - case Ttuple: return visitTuple (cast(TypeTuple)mt); + case Tvector: return visitVector (mt.isTypeVector()); + case Tsarray: return visitSArray (mt.isTypeSArray()); + case Tfunction: return visitFunction(mt.isTypeFunction()); + case Tstruct: return visitStruct (mt.isTypeStruct()); + case Tenum: return visitEnum (mt.isTypeEnum()); + case Ttuple: return visitTuple (mt.isTypeTuple()); case Tnull: return new NullExp(Loc.initial, Type.tnull); @@ -4803,7 +4803,7 @@ extern (C++) Expression defaultInit(Type mt, const ref Loc loc, const bool isCfi case Treference: case Tdelegate: case Tclass: return new NullExp(loc, mt); - case Tnoreturn: return visitNoreturn(cast(TypeNoreturn) mt); + case Tnoreturn: return visitNoreturn(mt.isTypeNoreturn()); default: return mt.isTypeBasic() ? visitBasic(cast(TypeBasic)mt) : diff --git a/gcc/d/expr.cc b/gcc/d/expr.cc index f1c014d..665d122 100644 --- a/gcc/d/expr.cc +++ b/gcc/d/expr.cc @@ -1477,16 +1477,10 @@ public: t1 = build_address (t1); Type *tnext = tb1->isTypePointer ()->next->toBasetype (); + /* This case should have been rewritten to `_d_delstruct` in the + semantic phase. */ if (TypeStruct *ts = tnext->isTypeStruct ()) - { - if (ts->sym->dtor) - { - tree ti = build_typeinfo (e->loc, tnext); - this->result_ = build_libcall (LIBCALL_DELSTRUCT, Type::tvoid, - 2, t1, ti); - return; - } - } + gcc_assert (!ts->sym->dtor); /* Otherwise, the garbage collector is called to immediately free the memory allocated for the pointer. */ diff --git a/gcc/d/runtime.def b/gcc/d/runtime.def index 3961a1d..ef54a37 100644 --- a/gcc/d/runtime.def +++ b/gcc/d/runtime.def @@ -85,8 +85,6 @@ DEF_D_RUNTIME (NEWITEMIT, "_d_newitemiT", RT(VOIDPTR), P1(CONST_TYPEINFO), 0) /* Used when calling delete on a pointer. */ DEF_D_RUNTIME (DELMEMORY, "_d_delmemory", RT(VOID), P1(POINTER_VOIDPTR), 0) -DEF_D_RUNTIME (DELSTRUCT, "_d_delstruct", RT(VOID), - P2(POINTER_VOIDPTR, TYPEINFO), 0) /* Used when calling new on an array. The `i' variant is for when the initializer is nonzero, and the `m' variant is when initializing a diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi index cf508ff..6adc4e8 100644 --- a/gcc/doc/extend.texi +++ b/gcc/doc/extend.texi @@ -6850,12 +6850,12 @@ and SSE4.2). @item sse4.1 @itemx no-sse4.1 @cindex @code{target("sse4.1")} function attribute, x86 -Enable/disable the generation of the sse4.1 instructions. +Enable/disable the generation of the SSE4.1 instructions. @item sse4.2 @itemx no-sse4.2 @cindex @code{target("sse4.2")} function attribute, x86 -Enable/disable the generation of the sse4.2 instructions. +Enable/disable the generation of the SSE4.2 instructions. @item sse4a @itemx no-sse4a @@ -14586,34 +14586,34 @@ The following built-in functions are always available. They all generate the machine instruction that is part of the name. @smallexample -long __builtin_alpha_implver (void) -long __builtin_alpha_rpcc (void) -long __builtin_alpha_amask (long) -long __builtin_alpha_cmpbge (long, long) -long __builtin_alpha_extbl (long, long) -long __builtin_alpha_extwl (long, long) -long __builtin_alpha_extll (long, long) -long __builtin_alpha_extql (long, long) -long __builtin_alpha_extwh (long, long) -long __builtin_alpha_extlh (long, long) -long __builtin_alpha_extqh (long, long) -long __builtin_alpha_insbl (long, long) -long __builtin_alpha_inswl (long, long) -long __builtin_alpha_insll (long, long) -long __builtin_alpha_insql (long, long) -long __builtin_alpha_inswh (long, long) -long __builtin_alpha_inslh (long, long) -long __builtin_alpha_insqh (long, long) -long __builtin_alpha_mskbl (long, long) -long __builtin_alpha_mskwl (long, long) -long __builtin_alpha_mskll (long, long) -long __builtin_alpha_mskql (long, long) -long __builtin_alpha_mskwh (long, long) -long __builtin_alpha_msklh (long, long) -long __builtin_alpha_mskqh (long, long) -long __builtin_alpha_umulh (long, long) -long __builtin_alpha_zap (long, long) -long __builtin_alpha_zapnot (long, long) +long __builtin_alpha_implver (void); +long __builtin_alpha_rpcc (void); +long __builtin_alpha_amask (long); +long __builtin_alpha_cmpbge (long, long); +long __builtin_alpha_extbl (long, long); +long __builtin_alpha_extwl (long, long); +long __builtin_alpha_extll (long, long); +long __builtin_alpha_extql (long, long); +long __builtin_alpha_extwh (long, long); +long __builtin_alpha_extlh (long, long); +long __builtin_alpha_extqh (long, long); +long __builtin_alpha_insbl (long, long); +long __builtin_alpha_inswl (long, long); +long __builtin_alpha_insll (long, long); +long __builtin_alpha_insql (long, long); +long __builtin_alpha_inswh (long, long); +long __builtin_alpha_inslh (long, long); +long __builtin_alpha_insqh (long, long); +long __builtin_alpha_mskbl (long, long); +long __builtin_alpha_mskwl (long, long); +long __builtin_alpha_mskll (long, long); +long __builtin_alpha_mskql (long, long); +long __builtin_alpha_mskwh (long, long); +long __builtin_alpha_msklh (long, long); +long __builtin_alpha_mskqh (long, long); +long __builtin_alpha_umulh (long, long); +long __builtin_alpha_zap (long, long); +long __builtin_alpha_zapnot (long, long); @end smallexample The following built-in functions are always with @option{-mmax} @@ -14622,19 +14622,19 @@ later. They all generate the machine instruction that is part of the name. @smallexample -long __builtin_alpha_pklb (long) -long __builtin_alpha_pkwb (long) -long __builtin_alpha_unpkbl (long) -long __builtin_alpha_unpkbw (long) -long __builtin_alpha_minub8 (long, long) -long __builtin_alpha_minsb8 (long, long) -long __builtin_alpha_minuw4 (long, long) -long __builtin_alpha_minsw4 (long, long) -long __builtin_alpha_maxub8 (long, long) -long __builtin_alpha_maxsb8 (long, long) -long __builtin_alpha_maxuw4 (long, long) -long __builtin_alpha_maxsw4 (long, long) -long __builtin_alpha_perr (long, long) +long __builtin_alpha_pklb (long); +long __builtin_alpha_pkwb (long); +long __builtin_alpha_unpkbl (long); +long __builtin_alpha_unpkbw (long); +long __builtin_alpha_minub8 (long, long); +long __builtin_alpha_minsb8 (long, long); +long __builtin_alpha_minuw4 (long, long); +long __builtin_alpha_minsw4 (long, long); +long __builtin_alpha_maxub8 (long, long); +long __builtin_alpha_maxsb8 (long, long); +long __builtin_alpha_maxuw4 (long, long); +long __builtin_alpha_maxsw4 (long, long); +long __builtin_alpha_perr (long, long); @end smallexample The following built-in functions are always with @option{-mcix} @@ -14643,9 +14643,9 @@ later. They all generate the machine instruction that is part of the name. @smallexample -long __builtin_alpha_cttz (long) -long __builtin_alpha_ctlz (long) -long __builtin_alpha_ctpop (long) +long __builtin_alpha_cttz (long); +long __builtin_alpha_ctlz (long); +long __builtin_alpha_ctpop (long); @end smallexample The following built-in functions are available on systems that use the OSF/1 @@ -14654,8 +14654,8 @@ PAL calls, but when invoked with @option{-mtls-kernel}, they invoke @code{rdval} and @code{wrval}. @smallexample -void *__builtin_thread_pointer (void) -void __builtin_set_thread_pointer (void *) +void *__builtin_thread_pointer (void); +void __builtin_set_thread_pointer (void *); @end smallexample @node Altera Nios II Built-in Functions @@ -14668,26 +14668,26 @@ The following built-in functions are always available. They all generate the machine instruction that is part of the name. @example -int __builtin_ldbio (volatile const void *) -int __builtin_ldbuio (volatile const void *) -int __builtin_ldhio (volatile const void *) -int __builtin_ldhuio (volatile const void *) -int __builtin_ldwio (volatile const void *) -void __builtin_stbio (volatile void *, int) -void __builtin_sthio (volatile void *, int) -void __builtin_stwio (volatile void *, int) -void __builtin_sync (void) +int __builtin_ldbio (volatile const void *); +int __builtin_ldbuio (volatile const void *); +int __builtin_ldhio (volatile const void *); +int __builtin_ldhuio (volatile const void *); +int __builtin_ldwio (volatile const void *); +void __builtin_stbio (volatile void *, int); +void __builtin_sthio (volatile void *, int); +void __builtin_stwio (volatile void *, int); +void __builtin_sync (void); int __builtin_rdctl (int) -int __builtin_rdprs (int, int) -void __builtin_wrctl (int, int) -void __builtin_flushd (volatile void *) -void __builtin_flushda (volatile void *) +int __builtin_rdprs (int, int); +void __builtin_wrctl (int, int); +void __builtin_flushd (volatile void *); +void __builtin_flushda (volatile void *); int __builtin_wrpie (int); void __builtin_eni (int); -int __builtin_ldex (volatile const void *) -int __builtin_stex (volatile void *, int) -int __builtin_ldsex (volatile const void *) -int __builtin_stsex (volatile void *, int) +int __builtin_ldex (volatile const void *); +int __builtin_stex (volatile void *, int); +int __builtin_ldsex (volatile const void *); +int __builtin_stsex (volatile void *, int); @end example The following built-in functions are always available. They @@ -14717,58 +14717,58 @@ The letters represent the following data types: And the function names are: @example -void __builtin_custom_n (void) -void __builtin_custom_ni (int) -void __builtin_custom_nf (float) -void __builtin_custom_np (void *) -void __builtin_custom_nii (int, int) -void __builtin_custom_nif (int, float) -void __builtin_custom_nip (int, void *) -void __builtin_custom_nfi (float, int) -void __builtin_custom_nff (float, float) -void __builtin_custom_nfp (float, void *) -void __builtin_custom_npi (void *, int) -void __builtin_custom_npf (void *, float) -void __builtin_custom_npp (void *, void *) -int __builtin_custom_in (void) -int __builtin_custom_ini (int) -int __builtin_custom_inf (float) -int __builtin_custom_inp (void *) -int __builtin_custom_inii (int, int) -int __builtin_custom_inif (int, float) -int __builtin_custom_inip (int, void *) -int __builtin_custom_infi (float, int) -int __builtin_custom_inff (float, float) -int __builtin_custom_infp (float, void *) -int __builtin_custom_inpi (void *, int) -int __builtin_custom_inpf (void *, float) -int __builtin_custom_inpp (void *, void *) -float __builtin_custom_fn (void) -float __builtin_custom_fni (int) -float __builtin_custom_fnf (float) -float __builtin_custom_fnp (void *) -float __builtin_custom_fnii (int, int) -float __builtin_custom_fnif (int, float) -float __builtin_custom_fnip (int, void *) -float __builtin_custom_fnfi (float, int) -float __builtin_custom_fnff (float, float) -float __builtin_custom_fnfp (float, void *) -float __builtin_custom_fnpi (void *, int) -float __builtin_custom_fnpf (void *, float) -float __builtin_custom_fnpp (void *, void *) -void * __builtin_custom_pn (void) -void * __builtin_custom_pni (int) -void * __builtin_custom_pnf (float) -void * __builtin_custom_pnp (void *) -void * __builtin_custom_pnii (int, int) -void * __builtin_custom_pnif (int, float) -void * __builtin_custom_pnip (int, void *) -void * __builtin_custom_pnfi (float, int) -void * __builtin_custom_pnff (float, float) -void * __builtin_custom_pnfp (float, void *) -void * __builtin_custom_pnpi (void *, int) -void * __builtin_custom_pnpf (void *, float) -void * __builtin_custom_pnpp (void *, void *) +void __builtin_custom_n (void); +void __builtin_custom_ni (int); +void __builtin_custom_nf (float); +void __builtin_custom_np (void *); +void __builtin_custom_nii (int, int); +void __builtin_custom_nif (int, float); +void __builtin_custom_nip (int, void *); +void __builtin_custom_nfi (float, int); +void __builtin_custom_nff (float, float); +void __builtin_custom_nfp (float, void *); +void __builtin_custom_npi (void *, int); +void __builtin_custom_npf (void *, float); +void __builtin_custom_npp (void *, void *); +int __builtin_custom_in (void); +int __builtin_custom_ini (int); +int __builtin_custom_inf (float); +int __builtin_custom_inp (void *); +int __builtin_custom_inii (int, int); +int __builtin_custom_inif (int, float); +int __builtin_custom_inip (int, void *); +int __builtin_custom_infi (float, int); +int __builtin_custom_inff (float, float); +int __builtin_custom_infp (float, void *); +int __builtin_custom_inpi (void *, int); +int __builtin_custom_inpf (void *, float); +int __builtin_custom_inpp (void *, void *); +float __builtin_custom_fn (void); +float __builtin_custom_fni (int); +float __builtin_custom_fnf (float); +float __builtin_custom_fnp (void *); +float __builtin_custom_fnii (int, int); +float __builtin_custom_fnif (int, float); +float __builtin_custom_fnip (int, void *); +float __builtin_custom_fnfi (float, int); +float __builtin_custom_fnff (float, float); +float __builtin_custom_fnfp (float, void *); +float __builtin_custom_fnpi (void *, int); +float __builtin_custom_fnpf (void *, float); +float __builtin_custom_fnpp (void *, void *); +void * __builtin_custom_pn (void); +void * __builtin_custom_pni (int); +void * __builtin_custom_pnf (float); +void * __builtin_custom_pnp (void *); +void * __builtin_custom_pnii (int, int); +void * __builtin_custom_pnif (int, float); +void * __builtin_custom_pnip (int, void *); +void * __builtin_custom_pnfi (float, int); +void * __builtin_custom_pnff (float, float); +void * __builtin_custom_pnfp (float, void *); +void * __builtin_custom_pnpi (void *, int); +void * __builtin_custom_pnpf (void *, float); +void * __builtin_custom_pnpp (void *, void *); @end example @node ARC Built-in Functions @@ -15003,147 +15003,147 @@ for ARC, grouped by calling signature. The following take two @code{__v8hi} arguments and return a @code{__v8hi} result: @example -__v8hi __builtin_arc_vaddaw (__v8hi, __v8hi) -__v8hi __builtin_arc_vaddw (__v8hi, __v8hi) -__v8hi __builtin_arc_vand (__v8hi, __v8hi) -__v8hi __builtin_arc_vandaw (__v8hi, __v8hi) -__v8hi __builtin_arc_vavb (__v8hi, __v8hi) -__v8hi __builtin_arc_vavrb (__v8hi, __v8hi) -__v8hi __builtin_arc_vbic (__v8hi, __v8hi) -__v8hi __builtin_arc_vbicaw (__v8hi, __v8hi) -__v8hi __builtin_arc_vdifaw (__v8hi, __v8hi) -__v8hi __builtin_arc_vdifw (__v8hi, __v8hi) -__v8hi __builtin_arc_veqw (__v8hi, __v8hi) -__v8hi __builtin_arc_vh264f (__v8hi, __v8hi) -__v8hi __builtin_arc_vh264ft (__v8hi, __v8hi) -__v8hi __builtin_arc_vh264fw (__v8hi, __v8hi) -__v8hi __builtin_arc_vlew (__v8hi, __v8hi) -__v8hi __builtin_arc_vltw (__v8hi, __v8hi) -__v8hi __builtin_arc_vmaxaw (__v8hi, __v8hi) -__v8hi __builtin_arc_vmaxw (__v8hi, __v8hi) -__v8hi __builtin_arc_vminaw (__v8hi, __v8hi) -__v8hi __builtin_arc_vminw (__v8hi, __v8hi) -__v8hi __builtin_arc_vmr1aw (__v8hi, __v8hi) -__v8hi __builtin_arc_vmr1w (__v8hi, __v8hi) -__v8hi __builtin_arc_vmr2aw (__v8hi, __v8hi) -__v8hi __builtin_arc_vmr2w (__v8hi, __v8hi) -__v8hi __builtin_arc_vmr3aw (__v8hi, __v8hi) -__v8hi __builtin_arc_vmr3w (__v8hi, __v8hi) -__v8hi __builtin_arc_vmr4aw (__v8hi, __v8hi) -__v8hi __builtin_arc_vmr4w (__v8hi, __v8hi) -__v8hi __builtin_arc_vmr5aw (__v8hi, __v8hi) -__v8hi __builtin_arc_vmr5w (__v8hi, __v8hi) -__v8hi __builtin_arc_vmr6aw (__v8hi, __v8hi) -__v8hi __builtin_arc_vmr6w (__v8hi, __v8hi) -__v8hi __builtin_arc_vmr7aw (__v8hi, __v8hi) -__v8hi __builtin_arc_vmr7w (__v8hi, __v8hi) -__v8hi __builtin_arc_vmrb (__v8hi, __v8hi) -__v8hi __builtin_arc_vmulaw (__v8hi, __v8hi) -__v8hi __builtin_arc_vmulfaw (__v8hi, __v8hi) -__v8hi __builtin_arc_vmulfw (__v8hi, __v8hi) -__v8hi __builtin_arc_vmulw (__v8hi, __v8hi) -__v8hi __builtin_arc_vnew (__v8hi, __v8hi) -__v8hi __builtin_arc_vor (__v8hi, __v8hi) -__v8hi __builtin_arc_vsubaw (__v8hi, __v8hi) -__v8hi __builtin_arc_vsubw (__v8hi, __v8hi) -__v8hi __builtin_arc_vsummw (__v8hi, __v8hi) -__v8hi __builtin_arc_vvc1f (__v8hi, __v8hi) -__v8hi __builtin_arc_vvc1ft (__v8hi, __v8hi) -__v8hi __builtin_arc_vxor (__v8hi, __v8hi) -__v8hi __builtin_arc_vxoraw (__v8hi, __v8hi) +__v8hi __builtin_arc_vaddaw (__v8hi, __v8hi); +__v8hi __builtin_arc_vaddw (__v8hi, __v8hi); +__v8hi __builtin_arc_vand (__v8hi, __v8hi); +__v8hi __builtin_arc_vandaw (__v8hi, __v8hi); +__v8hi __builtin_arc_vavb (__v8hi, __v8hi); +__v8hi __builtin_arc_vavrb (__v8hi, __v8hi); +__v8hi __builtin_arc_vbic (__v8hi, __v8hi); +__v8hi __builtin_arc_vbicaw (__v8hi, __v8hi); +__v8hi __builtin_arc_vdifaw (__v8hi, __v8hi); +__v8hi __builtin_arc_vdifw (__v8hi, __v8hi); +__v8hi __builtin_arc_veqw (__v8hi, __v8hi); +__v8hi __builtin_arc_vh264f (__v8hi, __v8hi); +__v8hi __builtin_arc_vh264ft (__v8hi, __v8hi); +__v8hi __builtin_arc_vh264fw (__v8hi, __v8hi); +__v8hi __builtin_arc_vlew (__v8hi, __v8hi); +__v8hi __builtin_arc_vltw (__v8hi, __v8hi); +__v8hi __builtin_arc_vmaxaw (__v8hi, __v8hi); +__v8hi __builtin_arc_vmaxw (__v8hi, __v8hi); +__v8hi __builtin_arc_vminaw (__v8hi, __v8hi); +__v8hi __builtin_arc_vminw (__v8hi, __v8hi); +__v8hi __builtin_arc_vmr1aw (__v8hi, __v8hi); +__v8hi __builtin_arc_vmr1w (__v8hi, __v8hi); +__v8hi __builtin_arc_vmr2aw (__v8hi, __v8hi); +__v8hi __builtin_arc_vmr2w (__v8hi, __v8hi); +__v8hi __builtin_arc_vmr3aw (__v8hi, __v8hi); +__v8hi __builtin_arc_vmr3w (__v8hi, __v8hi); +__v8hi __builtin_arc_vmr4aw (__v8hi, __v8hi); +__v8hi __builtin_arc_vmr4w (__v8hi, __v8hi); +__v8hi __builtin_arc_vmr5aw (__v8hi, __v8hi); +__v8hi __builtin_arc_vmr5w (__v8hi, __v8hi); +__v8hi __builtin_arc_vmr6aw (__v8hi, __v8hi); +__v8hi __builtin_arc_vmr6w (__v8hi, __v8hi); +__v8hi __builtin_arc_vmr7aw (__v8hi, __v8hi); +__v8hi __builtin_arc_vmr7w (__v8hi, __v8hi); +__v8hi __builtin_arc_vmrb (__v8hi, __v8hi); +__v8hi __builtin_arc_vmulaw (__v8hi, __v8hi); +__v8hi __builtin_arc_vmulfaw (__v8hi, __v8hi); +__v8hi __builtin_arc_vmulfw (__v8hi, __v8hi); +__v8hi __builtin_arc_vmulw (__v8hi, __v8hi); +__v8hi __builtin_arc_vnew (__v8hi, __v8hi); +__v8hi __builtin_arc_vor (__v8hi, __v8hi); +__v8hi __builtin_arc_vsubaw (__v8hi, __v8hi); +__v8hi __builtin_arc_vsubw (__v8hi, __v8hi); +__v8hi __builtin_arc_vsummw (__v8hi, __v8hi); +__v8hi __builtin_arc_vvc1f (__v8hi, __v8hi); +__v8hi __builtin_arc_vvc1ft (__v8hi, __v8hi); +__v8hi __builtin_arc_vxor (__v8hi, __v8hi); +__v8hi __builtin_arc_vxoraw (__v8hi, __v8hi); @end example The following take one @code{__v8hi} and one @code{int} argument and return a @code{__v8hi} result: @example -__v8hi __builtin_arc_vbaddw (__v8hi, int) -__v8hi __builtin_arc_vbmaxw (__v8hi, int) -__v8hi __builtin_arc_vbminw (__v8hi, int) -__v8hi __builtin_arc_vbmulaw (__v8hi, int) -__v8hi __builtin_arc_vbmulfw (__v8hi, int) -__v8hi __builtin_arc_vbmulw (__v8hi, int) -__v8hi __builtin_arc_vbrsubw (__v8hi, int) -__v8hi __builtin_arc_vbsubw (__v8hi, int) +__v8hi __builtin_arc_vbaddw (__v8hi, int); +__v8hi __builtin_arc_vbmaxw (__v8hi, int); +__v8hi __builtin_arc_vbminw (__v8hi, int); +__v8hi __builtin_arc_vbmulaw (__v8hi, int); +__v8hi __builtin_arc_vbmulfw (__v8hi, int); +__v8hi __builtin_arc_vbmulw (__v8hi, int); +__v8hi __builtin_arc_vbrsubw (__v8hi, int); +__v8hi __builtin_arc_vbsubw (__v8hi, int); @end example The following take one @code{__v8hi} argument and one @code{int} argument which must be a 3-bit compile time constant indicating a register number I0-I7. They return a @code{__v8hi} result. @example -__v8hi __builtin_arc_vasrw (__v8hi, const int) -__v8hi __builtin_arc_vsr8 (__v8hi, const int) -__v8hi __builtin_arc_vsr8aw (__v8hi, const int) +__v8hi __builtin_arc_vasrw (__v8hi, const int); +__v8hi __builtin_arc_vsr8 (__v8hi, const int); +__v8hi __builtin_arc_vsr8aw (__v8hi, const int); @end example The following take one @code{__v8hi} argument and one @code{int} argument which must be a 6-bit compile time constant. They return a @code{__v8hi} result. @example -__v8hi __builtin_arc_vasrpwbi (__v8hi, const int) -__v8hi __builtin_arc_vasrrpwbi (__v8hi, const int) -__v8hi __builtin_arc_vasrrwi (__v8hi, const int) -__v8hi __builtin_arc_vasrsrwi (__v8hi, const int) -__v8hi __builtin_arc_vasrwi (__v8hi, const int) -__v8hi __builtin_arc_vsr8awi (__v8hi, const int) -__v8hi __builtin_arc_vsr8i (__v8hi, const int) +__v8hi __builtin_arc_vasrpwbi (__v8hi, const int); +__v8hi __builtin_arc_vasrrpwbi (__v8hi, const int); +__v8hi __builtin_arc_vasrrwi (__v8hi, const int); +__v8hi __builtin_arc_vasrsrwi (__v8hi, const int); +__v8hi __builtin_arc_vasrwi (__v8hi, const int); +__v8hi __builtin_arc_vsr8awi (__v8hi, const int); +__v8hi __builtin_arc_vsr8i (__v8hi, const int); @end example The following take one @code{__v8hi} argument and one @code{int} argument which must be a 8-bit compile time constant. They return a @code{__v8hi} result. @example -__v8hi __builtin_arc_vd6tapf (__v8hi, const int) -__v8hi __builtin_arc_vmvaw (__v8hi, const int) -__v8hi __builtin_arc_vmvw (__v8hi, const int) -__v8hi __builtin_arc_vmvzw (__v8hi, const int) +__v8hi __builtin_arc_vd6tapf (__v8hi, const int); +__v8hi __builtin_arc_vmvaw (__v8hi, const int); +__v8hi __builtin_arc_vmvw (__v8hi, const int); +__v8hi __builtin_arc_vmvzw (__v8hi, const int); @end example The following take two @code{int} arguments, the second of which which must be a 8-bit compile time constant. They return a @code{__v8hi} result: @example -__v8hi __builtin_arc_vmovaw (int, const int) -__v8hi __builtin_arc_vmovw (int, const int) -__v8hi __builtin_arc_vmovzw (int, const int) +__v8hi __builtin_arc_vmovaw (int, const int); +__v8hi __builtin_arc_vmovw (int, const int); +__v8hi __builtin_arc_vmovzw (int, const int); @end example The following take a single @code{__v8hi} argument and return a @code{__v8hi} result: @example -__v8hi __builtin_arc_vabsaw (__v8hi) -__v8hi __builtin_arc_vabsw (__v8hi) -__v8hi __builtin_arc_vaddsuw (__v8hi) -__v8hi __builtin_arc_vexch1 (__v8hi) -__v8hi __builtin_arc_vexch2 (__v8hi) -__v8hi __builtin_arc_vexch4 (__v8hi) -__v8hi __builtin_arc_vsignw (__v8hi) -__v8hi __builtin_arc_vupbaw (__v8hi) -__v8hi __builtin_arc_vupbw (__v8hi) -__v8hi __builtin_arc_vupsbaw (__v8hi) -__v8hi __builtin_arc_vupsbw (__v8hi) +__v8hi __builtin_arc_vabsaw (__v8hi); +__v8hi __builtin_arc_vabsw (__v8hi); +__v8hi __builtin_arc_vaddsuw (__v8hi); +__v8hi __builtin_arc_vexch1 (__v8hi); +__v8hi __builtin_arc_vexch2 (__v8hi); +__v8hi __builtin_arc_vexch4 (__v8hi); +__v8hi __builtin_arc_vsignw (__v8hi); +__v8hi __builtin_arc_vupbaw (__v8hi); +__v8hi __builtin_arc_vupbw (__v8hi); +__v8hi __builtin_arc_vupsbaw (__v8hi); +__v8hi __builtin_arc_vupsbw (__v8hi); @end example The following take two @code{int} arguments and return no result: @example -void __builtin_arc_vdirun (int, int) -void __builtin_arc_vdorun (int, int) +void __builtin_arc_vdirun (int, int); +void __builtin_arc_vdorun (int, int); @end example The following take two @code{int} arguments and return no result. The first argument must a 3-bit compile time constant indicating one of the DR0-DR7 DMA setup channels: @example -void __builtin_arc_vdiwr (const int, int) -void __builtin_arc_vdowr (const int, int) +void __builtin_arc_vdiwr (const int, int); +void __builtin_arc_vdowr (const int, int); @end example The following take an @code{int} argument and return no result: @example -void __builtin_arc_vendrec (int) -void __builtin_arc_vrec (int) -void __builtin_arc_vrecrun (int) -void __builtin_arc_vrun (int) +void __builtin_arc_vendrec (int); +void __builtin_arc_vrec (int); +void __builtin_arc_vrecrun (int); +void __builtin_arc_vrun (int); @end example The following take a @code{__v8hi} argument and two @code{int} @@ -15157,10 +15157,10 @@ bits of the @code{__v8hi} register provided as the first argument with the value loaded from the @code{[Ib, u8]} location in the SDM. @example -__v8hi __builtin_arc_vld32 (__v8hi, const int, const int) -__v8hi __builtin_arc_vld32wh (__v8hi, const int, const int) -__v8hi __builtin_arc_vld32wl (__v8hi, const int, const int) -__v8hi __builtin_arc_vld64 (__v8hi, const int, const int) +__v8hi __builtin_arc_vld32 (__v8hi, const int, const int); +__v8hi __builtin_arc_vld32wh (__v8hi, const int, const int); +__v8hi __builtin_arc_vld32wl (__v8hi, const int, const int); +__v8hi __builtin_arc_vld64 (__v8hi, const int, const int); @end example The following take two @code{int} arguments and return a @code{__v8hi} @@ -15169,8 +15169,8 @@ indicating one the registers I0-I7, and the second argument must be an 8-bit compile time constant. @example -__v8hi __builtin_arc_vld128 (const int, const int) -__v8hi __builtin_arc_vld64w (const int, const int) +__v8hi __builtin_arc_vld128 (const int, const int); +__v8hi __builtin_arc_vld64w (const int, const int); @end example The following take a @code{__v8hi} argument and two @code{int} @@ -15179,8 +15179,8 @@ compile time constants, indicating one the registers I0-I7, and the third argument must be an 8-bit compile time constant. @example -void __builtin_arc_vst128 (__v8hi, const int, const int) -void __builtin_arc_vst64 (__v8hi, const int, const int) +void __builtin_arc_vst128 (__v8hi, const int, const int); +void __builtin_arc_vst64 (__v8hi, const int, const int); @end example The following take a @code{__v8hi} argument and three @code{int} @@ -15191,8 +15191,8 @@ indicating one the registers I0-I7, and the fourth argument must be an 8-bit compile time constant. @example -void __builtin_arc_vst16_n (__v8hi, const int, const int, const int) -void __builtin_arc_vst32_n (__v8hi, const int, const int, const int) +void __builtin_arc_vst16_n (__v8hi, const int, const int, const int); +void __builtin_arc_vst32_n (__v8hi, const int, const int, const int); @end example @node ARM iWMMXt Built-in Functions @@ -15206,146 +15206,146 @@ typedef int v2si __attribute__ ((vector_size (8))); typedef short v4hi __attribute__ ((vector_size (8))); typedef char v8qi __attribute__ ((vector_size (8))); -int __builtin_arm_getwcgr0 (void) -void __builtin_arm_setwcgr0 (int) -int __builtin_arm_getwcgr1 (void) -void __builtin_arm_setwcgr1 (int) -int __builtin_arm_getwcgr2 (void) -void __builtin_arm_setwcgr2 (int) -int __builtin_arm_getwcgr3 (void) -void __builtin_arm_setwcgr3 (int) -int __builtin_arm_textrmsb (v8qi, int) -int __builtin_arm_textrmsh (v4hi, int) -int __builtin_arm_textrmsw (v2si, int) -int __builtin_arm_textrmub (v8qi, int) -int __builtin_arm_textrmuh (v4hi, int) -int __builtin_arm_textrmuw (v2si, int) -v8qi __builtin_arm_tinsrb (v8qi, int, int) -v4hi __builtin_arm_tinsrh (v4hi, int, int) -v2si __builtin_arm_tinsrw (v2si, int, int) -long long __builtin_arm_tmia (long long, int, int) -long long __builtin_arm_tmiabb (long long, int, int) -long long __builtin_arm_tmiabt (long long, int, int) -long long __builtin_arm_tmiaph (long long, int, int) -long long __builtin_arm_tmiatb (long long, int, int) -long long __builtin_arm_tmiatt (long long, int, int) -int __builtin_arm_tmovmskb (v8qi) -int __builtin_arm_tmovmskh (v4hi) -int __builtin_arm_tmovmskw (v2si) -long long __builtin_arm_waccb (v8qi) -long long __builtin_arm_wacch (v4hi) -long long __builtin_arm_waccw (v2si) -v8qi __builtin_arm_waddb (v8qi, v8qi) -v8qi __builtin_arm_waddbss (v8qi, v8qi) -v8qi __builtin_arm_waddbus (v8qi, v8qi) -v4hi __builtin_arm_waddh (v4hi, v4hi) -v4hi __builtin_arm_waddhss (v4hi, v4hi) -v4hi __builtin_arm_waddhus (v4hi, v4hi) -v2si __builtin_arm_waddw (v2si, v2si) -v2si __builtin_arm_waddwss (v2si, v2si) -v2si __builtin_arm_waddwus (v2si, v2si) -v8qi __builtin_arm_walign (v8qi, v8qi, int) -long long __builtin_arm_wand(long long, long long) -long long __builtin_arm_wandn (long long, long long) -v8qi __builtin_arm_wavg2b (v8qi, v8qi) -v8qi __builtin_arm_wavg2br (v8qi, v8qi) -v4hi __builtin_arm_wavg2h (v4hi, v4hi) -v4hi __builtin_arm_wavg2hr (v4hi, v4hi) -v8qi __builtin_arm_wcmpeqb (v8qi, v8qi) -v4hi __builtin_arm_wcmpeqh (v4hi, v4hi) -v2si __builtin_arm_wcmpeqw (v2si, v2si) -v8qi __builtin_arm_wcmpgtsb (v8qi, v8qi) -v4hi __builtin_arm_wcmpgtsh (v4hi, v4hi) -v2si __builtin_arm_wcmpgtsw (v2si, v2si) -v8qi __builtin_arm_wcmpgtub (v8qi, v8qi) -v4hi __builtin_arm_wcmpgtuh (v4hi, v4hi) -v2si __builtin_arm_wcmpgtuw (v2si, v2si) -long long __builtin_arm_wmacs (long long, v4hi, v4hi) -long long __builtin_arm_wmacsz (v4hi, v4hi) -long long __builtin_arm_wmacu (long long, v4hi, v4hi) -long long __builtin_arm_wmacuz (v4hi, v4hi) -v4hi __builtin_arm_wmadds (v4hi, v4hi) -v4hi __builtin_arm_wmaddu (v4hi, v4hi) -v8qi __builtin_arm_wmaxsb (v8qi, v8qi) -v4hi __builtin_arm_wmaxsh (v4hi, v4hi) -v2si __builtin_arm_wmaxsw (v2si, v2si) -v8qi __builtin_arm_wmaxub (v8qi, v8qi) -v4hi __builtin_arm_wmaxuh (v4hi, v4hi) -v2si __builtin_arm_wmaxuw (v2si, v2si) -v8qi __builtin_arm_wminsb (v8qi, v8qi) -v4hi __builtin_arm_wminsh (v4hi, v4hi) -v2si __builtin_arm_wminsw (v2si, v2si) -v8qi __builtin_arm_wminub (v8qi, v8qi) -v4hi __builtin_arm_wminuh (v4hi, v4hi) -v2si __builtin_arm_wminuw (v2si, v2si) -v4hi __builtin_arm_wmulsm (v4hi, v4hi) -v4hi __builtin_arm_wmulul (v4hi, v4hi) -v4hi __builtin_arm_wmulum (v4hi, v4hi) -long long __builtin_arm_wor (long long, long long) -v2si __builtin_arm_wpackdss (long long, long long) -v2si __builtin_arm_wpackdus (long long, long long) -v8qi __builtin_arm_wpackhss (v4hi, v4hi) -v8qi __builtin_arm_wpackhus (v4hi, v4hi) -v4hi __builtin_arm_wpackwss (v2si, v2si) -v4hi __builtin_arm_wpackwus (v2si, v2si) -long long __builtin_arm_wrord (long long, long long) -long long __builtin_arm_wrordi (long long, int) -v4hi __builtin_arm_wrorh (v4hi, long long) -v4hi __builtin_arm_wrorhi (v4hi, int) -v2si __builtin_arm_wrorw (v2si, long long) -v2si __builtin_arm_wrorwi (v2si, int) -v2si __builtin_arm_wsadb (v2si, v8qi, v8qi) -v2si __builtin_arm_wsadbz (v8qi, v8qi) -v2si __builtin_arm_wsadh (v2si, v4hi, v4hi) -v2si __builtin_arm_wsadhz (v4hi, v4hi) -v4hi __builtin_arm_wshufh (v4hi, int) -long long __builtin_arm_wslld (long long, long long) -long long __builtin_arm_wslldi (long long, int) -v4hi __builtin_arm_wsllh (v4hi, long long) -v4hi __builtin_arm_wsllhi (v4hi, int) -v2si __builtin_arm_wsllw (v2si, long long) -v2si __builtin_arm_wsllwi (v2si, int) -long long __builtin_arm_wsrad (long long, long long) -long long __builtin_arm_wsradi (long long, int) -v4hi __builtin_arm_wsrah (v4hi, long long) -v4hi __builtin_arm_wsrahi (v4hi, int) -v2si __builtin_arm_wsraw (v2si, long long) -v2si __builtin_arm_wsrawi (v2si, int) -long long __builtin_arm_wsrld (long long, long long) -long long __builtin_arm_wsrldi (long long, int) -v4hi __builtin_arm_wsrlh (v4hi, long long) -v4hi __builtin_arm_wsrlhi (v4hi, int) -v2si __builtin_arm_wsrlw (v2si, long long) -v2si __builtin_arm_wsrlwi (v2si, int) -v8qi __builtin_arm_wsubb (v8qi, v8qi) -v8qi __builtin_arm_wsubbss (v8qi, v8qi) -v8qi __builtin_arm_wsubbus (v8qi, v8qi) -v4hi __builtin_arm_wsubh (v4hi, v4hi) -v4hi __builtin_arm_wsubhss (v4hi, v4hi) -v4hi __builtin_arm_wsubhus (v4hi, v4hi) -v2si __builtin_arm_wsubw (v2si, v2si) -v2si __builtin_arm_wsubwss (v2si, v2si) -v2si __builtin_arm_wsubwus (v2si, v2si) -v4hi __builtin_arm_wunpckehsb (v8qi) -v2si __builtin_arm_wunpckehsh (v4hi) -long long __builtin_arm_wunpckehsw (v2si) -v4hi __builtin_arm_wunpckehub (v8qi) -v2si __builtin_arm_wunpckehuh (v4hi) -long long __builtin_arm_wunpckehuw (v2si) -v4hi __builtin_arm_wunpckelsb (v8qi) -v2si __builtin_arm_wunpckelsh (v4hi) -long long __builtin_arm_wunpckelsw (v2si) -v4hi __builtin_arm_wunpckelub (v8qi) -v2si __builtin_arm_wunpckeluh (v4hi) -long long __builtin_arm_wunpckeluw (v2si) -v8qi __builtin_arm_wunpckihb (v8qi, v8qi) -v4hi __builtin_arm_wunpckihh (v4hi, v4hi) -v2si __builtin_arm_wunpckihw (v2si, v2si) -v8qi __builtin_arm_wunpckilb (v8qi, v8qi) -v4hi __builtin_arm_wunpckilh (v4hi, v4hi) -v2si __builtin_arm_wunpckilw (v2si, v2si) -long long __builtin_arm_wxor (long long, long long) -long long __builtin_arm_wzero () +int __builtin_arm_getwcgr0 (void); +void __builtin_arm_setwcgr0 (int); +int __builtin_arm_getwcgr1 (void); +void __builtin_arm_setwcgr1 (int); +int __builtin_arm_getwcgr2 (void); +void __builtin_arm_setwcgr2 (int); +int __builtin_arm_getwcgr3 (void); +void __builtin_arm_setwcgr3 (int); +int __builtin_arm_textrmsb (v8qi, int); +int __builtin_arm_textrmsh (v4hi, int); +int __builtin_arm_textrmsw (v2si, int); +int __builtin_arm_textrmub (v8qi, int); +int __builtin_arm_textrmuh (v4hi, int); +int __builtin_arm_textrmuw (v2si, int); +v8qi __builtin_arm_tinsrb (v8qi, int, int); +v4hi __builtin_arm_tinsrh (v4hi, int, int); +v2si __builtin_arm_tinsrw (v2si, int, int); +long long __builtin_arm_tmia (long long, int, int); +long long __builtin_arm_tmiabb (long long, int, int); +long long __builtin_arm_tmiabt (long long, int, int); +long long __builtin_arm_tmiaph (long long, int, int); +long long __builtin_arm_tmiatb (long long, int, int); +long long __builtin_arm_tmiatt (long long, int, int); +int __builtin_arm_tmovmskb (v8qi); +int __builtin_arm_tmovmskh (v4hi); +int __builtin_arm_tmovmskw (v2si); +long long __builtin_arm_waccb (v8qi); +long long __builtin_arm_wacch (v4hi); +long long __builtin_arm_waccw (v2si); +v8qi __builtin_arm_waddb (v8qi, v8qi); +v8qi __builtin_arm_waddbss (v8qi, v8qi); +v8qi __builtin_arm_waddbus (v8qi, v8qi); +v4hi __builtin_arm_waddh (v4hi, v4hi); +v4hi __builtin_arm_waddhss (v4hi, v4hi); +v4hi __builtin_arm_waddhus (v4hi, v4hi); +v2si __builtin_arm_waddw (v2si, v2si); +v2si __builtin_arm_waddwss (v2si, v2si); +v2si __builtin_arm_waddwus (v2si, v2si); +v8qi __builtin_arm_walign (v8qi, v8qi, int); +long long __builtin_arm_wand(long long, long long); +long long __builtin_arm_wandn (long long, long long); +v8qi __builtin_arm_wavg2b (v8qi, v8qi); +v8qi __builtin_arm_wavg2br (v8qi, v8qi); +v4hi __builtin_arm_wavg2h (v4hi, v4hi); +v4hi __builtin_arm_wavg2hr (v4hi, v4hi); +v8qi __builtin_arm_wcmpeqb (v8qi, v8qi); +v4hi __builtin_arm_wcmpeqh (v4hi, v4hi); +v2si __builtin_arm_wcmpeqw (v2si, v2si); +v8qi __builtin_arm_wcmpgtsb (v8qi, v8qi); +v4hi __builtin_arm_wcmpgtsh (v4hi, v4hi); +v2si __builtin_arm_wcmpgtsw (v2si, v2si); +v8qi __builtin_arm_wcmpgtub (v8qi, v8qi); +v4hi __builtin_arm_wcmpgtuh (v4hi, v4hi); +v2si __builtin_arm_wcmpgtuw (v2si, v2si); +long long __builtin_arm_wmacs (long long, v4hi, v4hi); +long long __builtin_arm_wmacsz (v4hi, v4hi); +long long __builtin_arm_wmacu (long long, v4hi, v4hi); +long long __builtin_arm_wmacuz (v4hi, v4hi); +v4hi __builtin_arm_wmadds (v4hi, v4hi); +v4hi __builtin_arm_wmaddu (v4hi, v4hi); +v8qi __builtin_arm_wmaxsb (v8qi, v8qi); +v4hi __builtin_arm_wmaxsh (v4hi, v4hi); +v2si __builtin_arm_wmaxsw (v2si, v2si); +v8qi __builtin_arm_wmaxub (v8qi, v8qi); +v4hi __builtin_arm_wmaxuh (v4hi, v4hi); +v2si __builtin_arm_wmaxuw (v2si, v2si); +v8qi __builtin_arm_wminsb (v8qi, v8qi); +v4hi __builtin_arm_wminsh (v4hi, v4hi); +v2si __builtin_arm_wminsw (v2si, v2si); +v8qi __builtin_arm_wminub (v8qi, v8qi); +v4hi __builtin_arm_wminuh (v4hi, v4hi); +v2si __builtin_arm_wminuw (v2si, v2si); +v4hi __builtin_arm_wmulsm (v4hi, v4hi); +v4hi __builtin_arm_wmulul (v4hi, v4hi); +v4hi __builtin_arm_wmulum (v4hi, v4hi); +long long __builtin_arm_wor (long long, long long); +v2si __builtin_arm_wpackdss (long long, long long); +v2si __builtin_arm_wpackdus (long long, long long); +v8qi __builtin_arm_wpackhss (v4hi, v4hi); +v8qi __builtin_arm_wpackhus (v4hi, v4hi); +v4hi __builtin_arm_wpackwss (v2si, v2si); +v4hi __builtin_arm_wpackwus (v2si, v2si); +long long __builtin_arm_wrord (long long, long long); +long long __builtin_arm_wrordi (long long, int); +v4hi __builtin_arm_wrorh (v4hi, long long); +v4hi __builtin_arm_wrorhi (v4hi, int); +v2si __builtin_arm_wrorw (v2si, long long); +v2si __builtin_arm_wrorwi (v2si, int); +v2si __builtin_arm_wsadb (v2si, v8qi, v8qi); +v2si __builtin_arm_wsadbz (v8qi, v8qi); +v2si __builtin_arm_wsadh (v2si, v4hi, v4hi); +v2si __builtin_arm_wsadhz (v4hi, v4hi); +v4hi __builtin_arm_wshufh (v4hi, int); +long long __builtin_arm_wslld (long long, long long); +long long __builtin_arm_wslldi (long long, int); +v4hi __builtin_arm_wsllh (v4hi, long long); +v4hi __builtin_arm_wsllhi (v4hi, int); +v2si __builtin_arm_wsllw (v2si, long long); +v2si __builtin_arm_wsllwi (v2si, int); +long long __builtin_arm_wsrad (long long, long long); +long long __builtin_arm_wsradi (long long, int); +v4hi __builtin_arm_wsrah (v4hi, long long); +v4hi __builtin_arm_wsrahi (v4hi, int); +v2si __builtin_arm_wsraw (v2si, long long); +v2si __builtin_arm_wsrawi (v2si, int); +long long __builtin_arm_wsrld (long long, long long); +long long __builtin_arm_wsrldi (long long, int); +v4hi __builtin_arm_wsrlh (v4hi, long long); +v4hi __builtin_arm_wsrlhi (v4hi, int); +v2si __builtin_arm_wsrlw (v2si, long long); +v2si __builtin_arm_wsrlwi (v2si, int); +v8qi __builtin_arm_wsubb (v8qi, v8qi); +v8qi __builtin_arm_wsubbss (v8qi, v8qi); +v8qi __builtin_arm_wsubbus (v8qi, v8qi); +v4hi __builtin_arm_wsubh (v4hi, v4hi); +v4hi __builtin_arm_wsubhss (v4hi, v4hi); +v4hi __builtin_arm_wsubhus (v4hi, v4hi); +v2si __builtin_arm_wsubw (v2si, v2si); +v2si __builtin_arm_wsubwss (v2si, v2si); +v2si __builtin_arm_wsubwus (v2si, v2si); +v4hi __builtin_arm_wunpckehsb (v8qi); +v2si __builtin_arm_wunpckehsh (v4hi); +long long __builtin_arm_wunpckehsw (v2si); +v4hi __builtin_arm_wunpckehub (v8qi); +v2si __builtin_arm_wunpckehuh (v4hi); +long long __builtin_arm_wunpckehuw (v2si); +v4hi __builtin_arm_wunpckelsb (v8qi); +v2si __builtin_arm_wunpckelsh (v4hi); +long long __builtin_arm_wunpckelsw (v2si); +v4hi __builtin_arm_wunpckelub (v8qi); +v2si __builtin_arm_wunpckeluh (v4hi); +long long __builtin_arm_wunpckeluw (v2si); +v8qi __builtin_arm_wunpckihb (v8qi, v8qi); +v4hi __builtin_arm_wunpckihh (v4hi, v4hi); +v2si __builtin_arm_wunpckihw (v2si, v2si); +v8qi __builtin_arm_wunpckilb (v8qi, v8qi); +v4hi __builtin_arm_wunpckilh (v4hi, v4hi); +v2si __builtin_arm_wunpckilw (v2si, v2si); +long long __builtin_arm_wxor (long long, long long); +long long __builtin_arm_wzero (); @end smallexample @@ -15399,18 +15399,18 @@ As part of the Security Extensions GCC implements the intrinsics below. FPTR is used here to mean any function pointer type. @smallexample -cmse_address_info_t cmse_TT (void *) -cmse_address_info_t cmse_TT_fptr (FPTR) -cmse_address_info_t cmse_TTT (void *) -cmse_address_info_t cmse_TTT_fptr (FPTR) -cmse_address_info_t cmse_TTA (void *) -cmse_address_info_t cmse_TTA_fptr (FPTR) -cmse_address_info_t cmse_TTAT (void *) -cmse_address_info_t cmse_TTAT_fptr (FPTR) -void * cmse_check_address_range (void *, size_t, int) -typeof(p) cmse_nsfptr_create (FPTR p) -intptr_t cmse_is_nsfptr (FPTR) -int cmse_nonsecure_caller (void) +cmse_address_info_t cmse_TT (void *); +cmse_address_info_t cmse_TT_fptr (FPTR); +cmse_address_info_t cmse_TTT (void *); +cmse_address_info_t cmse_TTT_fptr (FPTR); +cmse_address_info_t cmse_TTA (void *); +cmse_address_info_t cmse_TTA_fptr (FPTR); +cmse_address_info_t cmse_TTAT (void *); +cmse_address_info_t cmse_TTAT_fptr (FPTR); +void * cmse_check_address_range (void *, size_t, int); +typeof(p) cmse_nsfptr_create (FPTR p); +intptr_t cmse_is_nsfptr (FPTR); +int cmse_nonsecure_caller (void); @end smallexample @node AVR Built-in Functions @@ -15531,8 +15531,8 @@ automatically add workarounds for hardware errata involving these instructions. These functions are named as follows: @smallexample -void __builtin_bfin_csync (void) -void __builtin_bfin_ssync (void) +void __builtin_bfin_csync (void); +void __builtin_bfin_ssync (void); @end smallexample @node BPF Built-in Functions @@ -16108,110 +16108,110 @@ instruction. Please refer to the architecture specification for details on what each instruction does. @smallexample -v2q15 __builtin_mips_addq_ph (v2q15, v2q15) -v2q15 __builtin_mips_addq_s_ph (v2q15, v2q15) -q31 __builtin_mips_addq_s_w (q31, q31) -v4i8 __builtin_mips_addu_qb (v4i8, v4i8) -v4i8 __builtin_mips_addu_s_qb (v4i8, v4i8) -v2q15 __builtin_mips_subq_ph (v2q15, v2q15) -v2q15 __builtin_mips_subq_s_ph (v2q15, v2q15) -q31 __builtin_mips_subq_s_w (q31, q31) -v4i8 __builtin_mips_subu_qb (v4i8, v4i8) -v4i8 __builtin_mips_subu_s_qb (v4i8, v4i8) -i32 __builtin_mips_addsc (i32, i32) -i32 __builtin_mips_addwc (i32, i32) -i32 __builtin_mips_modsub (i32, i32) -i32 __builtin_mips_raddu_w_qb (v4i8) -v2q15 __builtin_mips_absq_s_ph (v2q15) -q31 __builtin_mips_absq_s_w (q31) -v4i8 __builtin_mips_precrq_qb_ph (v2q15, v2q15) -v2q15 __builtin_mips_precrq_ph_w (q31, q31) -v2q15 __builtin_mips_precrq_rs_ph_w (q31, q31) -v4i8 __builtin_mips_precrqu_s_qb_ph (v2q15, v2q15) -q31 __builtin_mips_preceq_w_phl (v2q15) -q31 __builtin_mips_preceq_w_phr (v2q15) -v2q15 __builtin_mips_precequ_ph_qbl (v4i8) -v2q15 __builtin_mips_precequ_ph_qbr (v4i8) -v2q15 __builtin_mips_precequ_ph_qbla (v4i8) -v2q15 __builtin_mips_precequ_ph_qbra (v4i8) -v2q15 __builtin_mips_preceu_ph_qbl (v4i8) -v2q15 __builtin_mips_preceu_ph_qbr (v4i8) -v2q15 __builtin_mips_preceu_ph_qbla (v4i8) -v2q15 __builtin_mips_preceu_ph_qbra (v4i8) -v4i8 __builtin_mips_shll_qb (v4i8, imm0_7) -v4i8 __builtin_mips_shll_qb (v4i8, i32) -v2q15 __builtin_mips_shll_ph (v2q15, imm0_15) -v2q15 __builtin_mips_shll_ph (v2q15, i32) -v2q15 __builtin_mips_shll_s_ph (v2q15, imm0_15) -v2q15 __builtin_mips_shll_s_ph (v2q15, i32) -q31 __builtin_mips_shll_s_w (q31, imm0_31) -q31 __builtin_mips_shll_s_w (q31, i32) -v4i8 __builtin_mips_shrl_qb (v4i8, imm0_7) -v4i8 __builtin_mips_shrl_qb (v4i8, i32) -v2q15 __builtin_mips_shra_ph (v2q15, imm0_15) -v2q15 __builtin_mips_shra_ph (v2q15, i32) -v2q15 __builtin_mips_shra_r_ph (v2q15, imm0_15) -v2q15 __builtin_mips_shra_r_ph (v2q15, i32) -q31 __builtin_mips_shra_r_w (q31, imm0_31) -q31 __builtin_mips_shra_r_w (q31, i32) -v2q15 __builtin_mips_muleu_s_ph_qbl (v4i8, v2q15) -v2q15 __builtin_mips_muleu_s_ph_qbr (v4i8, v2q15) -v2q15 __builtin_mips_mulq_rs_ph (v2q15, v2q15) -q31 __builtin_mips_muleq_s_w_phl (v2q15, v2q15) -q31 __builtin_mips_muleq_s_w_phr (v2q15, v2q15) -a64 __builtin_mips_dpau_h_qbl (a64, v4i8, v4i8) -a64 __builtin_mips_dpau_h_qbr (a64, v4i8, v4i8) -a64 __builtin_mips_dpsu_h_qbl (a64, v4i8, v4i8) -a64 __builtin_mips_dpsu_h_qbr (a64, v4i8, v4i8) -a64 __builtin_mips_dpaq_s_w_ph (a64, v2q15, v2q15) -a64 __builtin_mips_dpaq_sa_l_w (a64, q31, q31) -a64 __builtin_mips_dpsq_s_w_ph (a64, v2q15, v2q15) -a64 __builtin_mips_dpsq_sa_l_w (a64, q31, q31) -a64 __builtin_mips_mulsaq_s_w_ph (a64, v2q15, v2q15) -a64 __builtin_mips_maq_s_w_phl (a64, v2q15, v2q15) -a64 __builtin_mips_maq_s_w_phr (a64, v2q15, v2q15) -a64 __builtin_mips_maq_sa_w_phl (a64, v2q15, v2q15) -a64 __builtin_mips_maq_sa_w_phr (a64, v2q15, v2q15) -i32 __builtin_mips_bitrev (i32) -i32 __builtin_mips_insv (i32, i32) -v4i8 __builtin_mips_repl_qb (imm0_255) -v4i8 __builtin_mips_repl_qb (i32) -v2q15 __builtin_mips_repl_ph (imm_n512_511) -v2q15 __builtin_mips_repl_ph (i32) -void __builtin_mips_cmpu_eq_qb (v4i8, v4i8) -void __builtin_mips_cmpu_lt_qb (v4i8, v4i8) -void __builtin_mips_cmpu_le_qb (v4i8, v4i8) -i32 __builtin_mips_cmpgu_eq_qb (v4i8, v4i8) -i32 __builtin_mips_cmpgu_lt_qb (v4i8, v4i8) -i32 __builtin_mips_cmpgu_le_qb (v4i8, v4i8) -void __builtin_mips_cmp_eq_ph (v2q15, v2q15) -void __builtin_mips_cmp_lt_ph (v2q15, v2q15) -void __builtin_mips_cmp_le_ph (v2q15, v2q15) -v4i8 __builtin_mips_pick_qb (v4i8, v4i8) -v2q15 __builtin_mips_pick_ph (v2q15, v2q15) -v2q15 __builtin_mips_packrl_ph (v2q15, v2q15) -i32 __builtin_mips_extr_w (a64, imm0_31) -i32 __builtin_mips_extr_w (a64, i32) -i32 __builtin_mips_extr_r_w (a64, imm0_31) -i32 __builtin_mips_extr_s_h (a64, i32) -i32 __builtin_mips_extr_rs_w (a64, imm0_31) -i32 __builtin_mips_extr_rs_w (a64, i32) -i32 __builtin_mips_extr_s_h (a64, imm0_31) -i32 __builtin_mips_extr_r_w (a64, i32) -i32 __builtin_mips_extp (a64, imm0_31) -i32 __builtin_mips_extp (a64, i32) -i32 __builtin_mips_extpdp (a64, imm0_31) -i32 __builtin_mips_extpdp (a64, i32) -a64 __builtin_mips_shilo (a64, imm_n32_31) -a64 __builtin_mips_shilo (a64, i32) -a64 __builtin_mips_mthlip (a64, i32) -void __builtin_mips_wrdsp (i32, imm0_63) -i32 __builtin_mips_rddsp (imm0_63) -i32 __builtin_mips_lbux (void *, i32) -i32 __builtin_mips_lhx (void *, i32) -i32 __builtin_mips_lwx (void *, i32) -a64 __builtin_mips_ldx (void *, i32) [MIPS64 only] -i32 __builtin_mips_bposge32 (void) +v2q15 __builtin_mips_addq_ph (v2q15, v2q15); +v2q15 __builtin_mips_addq_s_ph (v2q15, v2q15); +q31 __builtin_mips_addq_s_w (q31, q31); +v4i8 __builtin_mips_addu_qb (v4i8, v4i8); +v4i8 __builtin_mips_addu_s_qb (v4i8, v4i8); +v2q15 __builtin_mips_subq_ph (v2q15, v2q15); +v2q15 __builtin_mips_subq_s_ph (v2q15, v2q15); +q31 __builtin_mips_subq_s_w (q31, q31); +v4i8 __builtin_mips_subu_qb (v4i8, v4i8); +v4i8 __builtin_mips_subu_s_qb (v4i8, v4i8); +i32 __builtin_mips_addsc (i32, i32); +i32 __builtin_mips_addwc (i32, i32); +i32 __builtin_mips_modsub (i32, i32); +i32 __builtin_mips_raddu_w_qb (v4i8); +v2q15 __builtin_mips_absq_s_ph (v2q15); +q31 __builtin_mips_absq_s_w (q31); +v4i8 __builtin_mips_precrq_qb_ph (v2q15, v2q15); +v2q15 __builtin_mips_precrq_ph_w (q31, q31); +v2q15 __builtin_mips_precrq_rs_ph_w (q31, q31); +v4i8 __builtin_mips_precrqu_s_qb_ph (v2q15, v2q15); +q31 __builtin_mips_preceq_w_phl (v2q15); +q31 __builtin_mips_preceq_w_phr (v2q15); +v2q15 __builtin_mips_precequ_ph_qbl (v4i8); +v2q15 __builtin_mips_precequ_ph_qbr (v4i8); +v2q15 __builtin_mips_precequ_ph_qbla (v4i8); +v2q15 __builtin_mips_precequ_ph_qbra (v4i8); +v2q15 __builtin_mips_preceu_ph_qbl (v4i8); +v2q15 __builtin_mips_preceu_ph_qbr (v4i8); +v2q15 __builtin_mips_preceu_ph_qbla (v4i8); +v2q15 __builtin_mips_preceu_ph_qbra (v4i8); +v4i8 __builtin_mips_shll_qb (v4i8, imm0_7); +v4i8 __builtin_mips_shll_qb (v4i8, i32); +v2q15 __builtin_mips_shll_ph (v2q15, imm0_15); +v2q15 __builtin_mips_shll_ph (v2q15, i32); +v2q15 __builtin_mips_shll_s_ph (v2q15, imm0_15); +v2q15 __builtin_mips_shll_s_ph (v2q15, i32); +q31 __builtin_mips_shll_s_w (q31, imm0_31); +q31 __builtin_mips_shll_s_w (q31, i32); +v4i8 __builtin_mips_shrl_qb (v4i8, imm0_7); +v4i8 __builtin_mips_shrl_qb (v4i8, i32); +v2q15 __builtin_mips_shra_ph (v2q15, imm0_15); +v2q15 __builtin_mips_shra_ph (v2q15, i32); +v2q15 __builtin_mips_shra_r_ph (v2q15, imm0_15); +v2q15 __builtin_mips_shra_r_ph (v2q15, i32); +q31 __builtin_mips_shra_r_w (q31, imm0_31); +q31 __builtin_mips_shra_r_w (q31, i32); +v2q15 __builtin_mips_muleu_s_ph_qbl (v4i8, v2q15); +v2q15 __builtin_mips_muleu_s_ph_qbr (v4i8, v2q15); +v2q15 __builtin_mips_mulq_rs_ph (v2q15, v2q15); +q31 __builtin_mips_muleq_s_w_phl (v2q15, v2q15); +q31 __builtin_mips_muleq_s_w_phr (v2q15, v2q15); +a64 __builtin_mips_dpau_h_qbl (a64, v4i8, v4i8); +a64 __builtin_mips_dpau_h_qbr (a64, v4i8, v4i8); +a64 __builtin_mips_dpsu_h_qbl (a64, v4i8, v4i8); +a64 __builtin_mips_dpsu_h_qbr (a64, v4i8, v4i8); +a64 __builtin_mips_dpaq_s_w_ph (a64, v2q15, v2q15); +a64 __builtin_mips_dpaq_sa_l_w (a64, q31, q31); +a64 __builtin_mips_dpsq_s_w_ph (a64, v2q15, v2q15); +a64 __builtin_mips_dpsq_sa_l_w (a64, q31, q31); +a64 __builtin_mips_mulsaq_s_w_ph (a64, v2q15, v2q15); +a64 __builtin_mips_maq_s_w_phl (a64, v2q15, v2q15); +a64 __builtin_mips_maq_s_w_phr (a64, v2q15, v2q15); +a64 __builtin_mips_maq_sa_w_phl (a64, v2q15, v2q15); +a64 __builtin_mips_maq_sa_w_phr (a64, v2q15, v2q15); +i32 __builtin_mips_bitrev (i32); +i32 __builtin_mips_insv (i32, i32); +v4i8 __builtin_mips_repl_qb (imm0_255); +v4i8 __builtin_mips_repl_qb (i32); +v2q15 __builtin_mips_repl_ph (imm_n512_511); +v2q15 __builtin_mips_repl_ph (i32); +void __builtin_mips_cmpu_eq_qb (v4i8, v4i8); +void __builtin_mips_cmpu_lt_qb (v4i8, v4i8); +void __builtin_mips_cmpu_le_qb (v4i8, v4i8); +i32 __builtin_mips_cmpgu_eq_qb (v4i8, v4i8); +i32 __builtin_mips_cmpgu_lt_qb (v4i8, v4i8); +i32 __builtin_mips_cmpgu_le_qb (v4i8, v4i8); +void __builtin_mips_cmp_eq_ph (v2q15, v2q15); +void __builtin_mips_cmp_lt_ph (v2q15, v2q15); +void __builtin_mips_cmp_le_ph (v2q15, v2q15); +v4i8 __builtin_mips_pick_qb (v4i8, v4i8); +v2q15 __builtin_mips_pick_ph (v2q15, v2q15); +v2q15 __builtin_mips_packrl_ph (v2q15, v2q15); +i32 __builtin_mips_extr_w (a64, imm0_31); +i32 __builtin_mips_extr_w (a64, i32); +i32 __builtin_mips_extr_r_w (a64, imm0_31); +i32 __builtin_mips_extr_s_h (a64, i32); +i32 __builtin_mips_extr_rs_w (a64, imm0_31); +i32 __builtin_mips_extr_rs_w (a64, i32); +i32 __builtin_mips_extr_s_h (a64, imm0_31); +i32 __builtin_mips_extr_r_w (a64, i32); +i32 __builtin_mips_extp (a64, imm0_31); +i32 __builtin_mips_extp (a64, i32); +i32 __builtin_mips_extpdp (a64, imm0_31); +i32 __builtin_mips_extpdp (a64, i32); +a64 __builtin_mips_shilo (a64, imm_n32_31); +a64 __builtin_mips_shilo (a64, i32); +a64 __builtin_mips_mthlip (a64, i32); +void __builtin_mips_wrdsp (i32, imm0_63); +i32 __builtin_mips_rddsp (imm0_63); +i32 __builtin_mips_lbux (void *, i32); +i32 __builtin_mips_lhx (void *, i32); +i32 __builtin_mips_lwx (void *, i32); +a64 __builtin_mips_ldx (void *, i32); /* MIPS64 only */ +i32 __builtin_mips_bposge32 (void); a64 __builtin_mips_madd (a64, i32, i32); a64 __builtin_mips_maddu (a64, ui32, ui32); a64 __builtin_mips_msub (a64, i32, i32); @@ -19724,12 +19724,12 @@ must be a constant integer in the range of 0 to 15. The following sign extension builtins are provided: @smallexample -vector signed int vec_signexti (vector signed char a) -vector signed long long vec_signextll (vector signed char a) -vector signed int vec_signexti (vector signed short a) -vector signed long long vec_signextll (vector signed short a) -vector signed long long vec_signextll (vector signed int a) -vector signed long long vec_signextq (vector signed long long a) +vector signed int vec_signexti (vector signed char a); +vector signed long long vec_signextll (vector signed char a); +vector signed int vec_signexti (vector signed short a); +vector signed long long vec_signextll (vector signed short a); +vector signed long long vec_signextll (vector signed int a); +vector signed long long vec_signextq (vector signed long long a); @end smallexample Each element of the result is produced by sign-extending the element of the @@ -20555,28 +20555,28 @@ the @code{__builtin_tcheck} builtin, which does not take any input arguments. Refer to the ISA manual for a description of each instruction's operands. @smallexample -unsigned int __builtin_tbegin (unsigned int) -unsigned int __builtin_tend (unsigned int) +unsigned int __builtin_tbegin (unsigned int); +unsigned int __builtin_tend (unsigned int); -unsigned int __builtin_tabort (unsigned int) -unsigned int __builtin_tabortdc (unsigned int, unsigned int, unsigned int) -unsigned int __builtin_tabortdci (unsigned int, unsigned int, int) -unsigned int __builtin_tabortwc (unsigned int, unsigned int, unsigned int) -unsigned int __builtin_tabortwci (unsigned int, unsigned int, int) +unsigned int __builtin_tabort (unsigned int); +unsigned int __builtin_tabortdc (unsigned int, unsigned int, unsigned int); +unsigned int __builtin_tabortdci (unsigned int, unsigned int, int); +unsigned int __builtin_tabortwc (unsigned int, unsigned int, unsigned int); +unsigned int __builtin_tabortwci (unsigned int, unsigned int, int); -unsigned int __builtin_tcheck (void) -unsigned int __builtin_treclaim (unsigned int) -unsigned int __builtin_trechkpt (void) -unsigned int __builtin_tsr (unsigned int) +unsigned int __builtin_tcheck (void); +unsigned int __builtin_treclaim (unsigned int); +unsigned int __builtin_trechkpt (void); +unsigned int __builtin_tsr (unsigned int); @end smallexample In addition to the above HTM built-ins, we have added built-ins for some common extended mnemonics of the HTM instructions: @smallexample -unsigned int __builtin_tendall (void) -unsigned int __builtin_tresume (void) -unsigned int __builtin_tsuspend (void) +unsigned int __builtin_tendall (void); +unsigned int __builtin_tresume (void); +unsigned int __builtin_tsuspend (void); @end smallexample Note that the semantics of the above HTM builtins are required to mimic @@ -20605,10 +20605,10 @@ The following set of built-in functions are available to gain access to the HTM specific special purpose registers. @smallexample -unsigned long __builtin_get_texasr (void) -unsigned long __builtin_get_texasru (void) -unsigned long __builtin_get_tfhar (void) -unsigned long __builtin_get_tfiar (void) +unsigned long __builtin_get_texasr (void); +unsigned long __builtin_get_texasru (void); +unsigned long __builtin_get_tfhar (void); +unsigned long __builtin_get_tfiar (void); void __builtin_set_texasr (unsigned long); void __builtin_set_texasru (unsigned long); @@ -20689,24 +20689,24 @@ and S/390, allowing users to write one HTM source implementation that can be compiled and executed on either system. @smallexample -long __TM_simple_begin (void) -long __TM_begin (void* const TM_buff) -long __TM_end (void) -void __TM_abort (void) -void __TM_named_abort (unsigned char const code) -void __TM_resume (void) -void __TM_suspend (void) +long __TM_simple_begin (void); +long __TM_begin (void* const TM_buff); +long __TM_end (void); +void __TM_abort (void); +void __TM_named_abort (unsigned char const code); +void __TM_resume (void); +void __TM_suspend (void); -long __TM_is_user_abort (void* const TM_buff) -long __TM_is_named_user_abort (void* const TM_buff, unsigned char *code) -long __TM_is_illegal (void* const TM_buff) -long __TM_is_footprint_exceeded (void* const TM_buff) -long __TM_nesting_depth (void* const TM_buff) -long __TM_is_nested_too_deep(void* const TM_buff) -long __TM_is_conflict(void* const TM_buff) -long __TM_is_failure_persistent(void* const TM_buff) -long __TM_failure_address(void* const TM_buff) -long long __TM_failure_code(void* const TM_buff) +long __TM_is_user_abort (void* const TM_buff); +long __TM_is_named_user_abort (void* const TM_buff, unsigned char *code); +long __TM_is_illegal (void* const TM_buff); +long __TM_is_footprint_exceeded (void* const TM_buff); +long __TM_nesting_depth (void* const TM_buff); +long __TM_is_nested_too_deep(void* const TM_buff); +long __TM_is_conflict(void* const TM_buff); +long __TM_is_failure_persistent(void* const TM_buff); +long __TM_failure_address(void* const TM_buff); +long long __TM_failure_code(void* const TM_buff); @end smallexample Using these common set of HTM inline functions, we can create @@ -21443,34 +21443,32 @@ inclusion of the @code{c6x_intrinsics.h} header file. They map directly to C6X instructions. @smallexample +int _sadd (int, int); +int _ssub (int, int); +int _sadd2 (int, int); +int _ssub2 (int, int); +long long _mpy2 (int, int); +long long _smpy2 (int, int); +int _add4 (int, int); +int _sub4 (int, int); +int _saddu4 (int, int); -int _sadd (int, int) -int _ssub (int, int) -int _sadd2 (int, int) -int _ssub2 (int, int) -long long _mpy2 (int, int) -long long _smpy2 (int, int) -int _add4 (int, int) -int _sub4 (int, int) -int _saddu4 (int, int) +int _smpy (int, int); +int _smpyh (int, int); +int _smpyhl (int, int); +int _smpylh (int, int); -int _smpy (int, int) -int _smpyh (int, int) -int _smpyhl (int, int) -int _smpylh (int, int) +int _sshl (int, int); +int _subc (int, int); -int _sshl (int, int) -int _subc (int, int) - -int _avg2 (int, int) -int _avgu4 (int, int) - -int _clrr (int, int) -int _extr (int, int) -int _extru (int, int) -int _abs (int) -int _abs2 (int) +int _avg2 (int, int); +int _avgu4 (int, int); +int _clrr (int, int); +int _extr (int, int); +int _extru (int, int); +int _abs (int); +int _abs2 (int); @end smallexample @node TILE-Gx Built-in Functions @@ -21492,16 +21490,14 @@ GCC also provides intrinsics to directly access the network registers. The intrinsics are: @smallexample - -unsigned long long __tile_idn0_receive (void) -unsigned long long __tile_idn1_receive (void) -unsigned long long __tile_udn0_receive (void) -unsigned long long __tile_udn1_receive (void) -unsigned long long __tile_udn2_receive (void) -unsigned long long __tile_udn3_receive (void) -void __tile_idn_send (unsigned long long) -void __tile_udn_send (unsigned long long) - +unsigned long long __tile_idn0_receive (void); +unsigned long long __tile_idn1_receive (void); +unsigned long long __tile_udn0_receive (void); +unsigned long long __tile_udn1_receive (void); +unsigned long long __tile_udn2_receive (void); +unsigned long long __tile_udn3_receive (void); +void __tile_idn_send (unsigned long long); +void __tile_udn_send (unsigned long long); @end smallexample The intrinsic @code{void __tile_network_barrier (void)} is used to @@ -21528,18 +21524,16 @@ GCC also provides intrinsics to directly access the network registers. The intrinsics are: @smallexample - -unsigned __tile_idn0_receive (void) -unsigned __tile_idn1_receive (void) -unsigned __tile_sn_receive (void) -unsigned __tile_udn0_receive (void) -unsigned __tile_udn1_receive (void) -unsigned __tile_udn2_receive (void) -unsigned __tile_udn3_receive (void) -void __tile_idn_send (unsigned) -void __tile_sn_send (unsigned) -void __tile_udn_send (unsigned) - +unsigned __tile_idn0_receive (void); +unsigned __tile_idn1_receive (void); +unsigned __tile_sn_receive (void); +unsigned __tile_udn0_receive (void); +unsigned __tile_udn1_receive (void); +unsigned __tile_udn2_receive (void); +unsigned __tile_udn3_receive (void); +void __tile_idn_send (unsigned); +void __tile_sn_send (unsigned); +void __tile_udn_send (unsigned); @end smallexample The intrinsic @code{void __tile_network_barrier (void)} is used to @@ -21916,59 +21910,58 @@ The following built-in functions are made available by @option{-mmmx}. All of them generate the machine instruction that is part of the name. @smallexample -v8qi __builtin_ia32_paddb (v8qi, v8qi) -v4hi __builtin_ia32_paddw (v4hi, v4hi) -v2si __builtin_ia32_paddd (v2si, v2si) -v8qi __builtin_ia32_psubb (v8qi, v8qi) -v4hi __builtin_ia32_psubw (v4hi, v4hi) -v2si __builtin_ia32_psubd (v2si, v2si) -v8qi __builtin_ia32_paddsb (v8qi, v8qi) -v4hi __builtin_ia32_paddsw (v4hi, v4hi) -v8qi __builtin_ia32_psubsb (v8qi, v8qi) -v4hi __builtin_ia32_psubsw (v4hi, v4hi) -v8qi __builtin_ia32_paddusb (v8qi, v8qi) -v4hi __builtin_ia32_paddusw (v4hi, v4hi) -v8qi __builtin_ia32_psubusb (v8qi, v8qi) -v4hi __builtin_ia32_psubusw (v4hi, v4hi) -v4hi __builtin_ia32_pmullw (v4hi, v4hi) -v4hi __builtin_ia32_pmulhw (v4hi, v4hi) -di __builtin_ia32_pand (di, di) -di __builtin_ia32_pandn (di,di) -di __builtin_ia32_por (di, di) -di __builtin_ia32_pxor (di, di) -v8qi __builtin_ia32_pcmpeqb (v8qi, v8qi) -v4hi __builtin_ia32_pcmpeqw (v4hi, v4hi) -v2si __builtin_ia32_pcmpeqd (v2si, v2si) -v8qi __builtin_ia32_pcmpgtb (v8qi, v8qi) -v4hi __builtin_ia32_pcmpgtw (v4hi, v4hi) -v2si __builtin_ia32_pcmpgtd (v2si, v2si) -v8qi __builtin_ia32_punpckhbw (v8qi, v8qi) -v4hi __builtin_ia32_punpckhwd (v4hi, v4hi) -v2si __builtin_ia32_punpckhdq (v2si, v2si) -v8qi __builtin_ia32_punpcklbw (v8qi, v8qi) -v4hi __builtin_ia32_punpcklwd (v4hi, v4hi) -v2si __builtin_ia32_punpckldq (v2si, v2si) -v8qi __builtin_ia32_packsswb (v4hi, v4hi) -v4hi __builtin_ia32_packssdw (v2si, v2si) -v8qi __builtin_ia32_packuswb (v4hi, v4hi) - -v4hi __builtin_ia32_psllw (v4hi, v4hi) -v2si __builtin_ia32_pslld (v2si, v2si) -v1di __builtin_ia32_psllq (v1di, v1di) -v4hi __builtin_ia32_psrlw (v4hi, v4hi) -v2si __builtin_ia32_psrld (v2si, v2si) -v1di __builtin_ia32_psrlq (v1di, v1di) -v4hi __builtin_ia32_psraw (v4hi, v4hi) -v2si __builtin_ia32_psrad (v2si, v2si) -v4hi __builtin_ia32_psllwi (v4hi, int) -v2si __builtin_ia32_pslldi (v2si, int) -v1di __builtin_ia32_psllqi (v1di, int) -v4hi __builtin_ia32_psrlwi (v4hi, int) -v2si __builtin_ia32_psrldi (v2si, int) -v1di __builtin_ia32_psrlqi (v1di, int) -v4hi __builtin_ia32_psrawi (v4hi, int) -v2si __builtin_ia32_psradi (v2si, int) - +v8qi __builtin_ia32_paddb (v8qi, v8qi); +v4hi __builtin_ia32_paddw (v4hi, v4hi); +v2si __builtin_ia32_paddd (v2si, v2si); +v8qi __builtin_ia32_psubb (v8qi, v8qi); +v4hi __builtin_ia32_psubw (v4hi, v4hi); +v2si __builtin_ia32_psubd (v2si, v2si); +v8qi __builtin_ia32_paddsb (v8qi, v8qi); +v4hi __builtin_ia32_paddsw (v4hi, v4hi); +v8qi __builtin_ia32_psubsb (v8qi, v8qi); +v4hi __builtin_ia32_psubsw (v4hi, v4hi); +v8qi __builtin_ia32_paddusb (v8qi, v8qi); +v4hi __builtin_ia32_paddusw (v4hi, v4hi); +v8qi __builtin_ia32_psubusb (v8qi, v8qi); +v4hi __builtin_ia32_psubusw (v4hi, v4hi); +v4hi __builtin_ia32_pmullw (v4hi, v4hi); +v4hi __builtin_ia32_pmulhw (v4hi, v4hi); +di __builtin_ia32_pand (di, di); +di __builtin_ia32_pandn (di,di); +di __builtin_ia32_por (di, di); +di __builtin_ia32_pxor (di, di); +v8qi __builtin_ia32_pcmpeqb (v8qi, v8qi); +v4hi __builtin_ia32_pcmpeqw (v4hi, v4hi); +v2si __builtin_ia32_pcmpeqd (v2si, v2si); +v8qi __builtin_ia32_pcmpgtb (v8qi, v8qi); +v4hi __builtin_ia32_pcmpgtw (v4hi, v4hi); +v2si __builtin_ia32_pcmpgtd (v2si, v2si); +v8qi __builtin_ia32_punpckhbw (v8qi, v8qi); +v4hi __builtin_ia32_punpckhwd (v4hi, v4hi); +v2si __builtin_ia32_punpckhdq (v2si, v2si); +v8qi __builtin_ia32_punpcklbw (v8qi, v8qi); +v4hi __builtin_ia32_punpcklwd (v4hi, v4hi); +v2si __builtin_ia32_punpckldq (v2si, v2si); +v8qi __builtin_ia32_packsswb (v4hi, v4hi); +v4hi __builtin_ia32_packssdw (v2si, v2si); +v8qi __builtin_ia32_packuswb (v4hi, v4hi); + +v4hi __builtin_ia32_psllw (v4hi, v4hi); +v2si __builtin_ia32_pslld (v2si, v2si); +v1di __builtin_ia32_psllq (v1di, v1di); +v4hi __builtin_ia32_psrlw (v4hi, v4hi); +v2si __builtin_ia32_psrld (v2si, v2si); +v1di __builtin_ia32_psrlq (v1di, v1di); +v4hi __builtin_ia32_psraw (v4hi, v4hi); +v2si __builtin_ia32_psrad (v2si, v2si); +v4hi __builtin_ia32_psllwi (v4hi, int); +v2si __builtin_ia32_pslldi (v2si, int); +v1di __builtin_ia32_psllqi (v1di, int); +v4hi __builtin_ia32_psrlwi (v4hi, int); +v2si __builtin_ia32_psrldi (v2si, int); +v1di __builtin_ia32_psrlqi (v1di, int); +v4hi __builtin_ia32_psrawi (v4hi, int); +v2si __builtin_ia32_psradi (v2si, int); @end smallexample The following built-in functions are made available either with @@ -21976,92 +21969,92 @@ The following built-in functions are made available either with the machine instruction that is part of the name. @smallexample -v4hi __builtin_ia32_pmulhuw (v4hi, v4hi) -v8qi __builtin_ia32_pavgb (v8qi, v8qi) -v4hi __builtin_ia32_pavgw (v4hi, v4hi) -v1di __builtin_ia32_psadbw (v8qi, v8qi) -v8qi __builtin_ia32_pmaxub (v8qi, v8qi) -v4hi __builtin_ia32_pmaxsw (v4hi, v4hi) -v8qi __builtin_ia32_pminub (v8qi, v8qi) -v4hi __builtin_ia32_pminsw (v4hi, v4hi) -int __builtin_ia32_pmovmskb (v8qi) -void __builtin_ia32_maskmovq (v8qi, v8qi, char *) -void __builtin_ia32_movntq (di *, di) -void __builtin_ia32_sfence (void) +v4hi __builtin_ia32_pmulhuw (v4hi, v4hi); +v8qi __builtin_ia32_pavgb (v8qi, v8qi); +v4hi __builtin_ia32_pavgw (v4hi, v4hi); +v1di __builtin_ia32_psadbw (v8qi, v8qi); +v8qi __builtin_ia32_pmaxub (v8qi, v8qi); +v4hi __builtin_ia32_pmaxsw (v4hi, v4hi); +v8qi __builtin_ia32_pminub (v8qi, v8qi); +v4hi __builtin_ia32_pminsw (v4hi, v4hi); +int __builtin_ia32_pmovmskb (v8qi); +void __builtin_ia32_maskmovq (v8qi, v8qi, char *); +void __builtin_ia32_movntq (di *, di); +void __builtin_ia32_sfence (void); @end smallexample The following built-in functions are available when @option{-msse} is used. All of them generate the machine instruction that is part of the name. @smallexample -int __builtin_ia32_comieq (v4sf, v4sf) -int __builtin_ia32_comineq (v4sf, v4sf) -int __builtin_ia32_comilt (v4sf, v4sf) -int __builtin_ia32_comile (v4sf, v4sf) -int __builtin_ia32_comigt (v4sf, v4sf) -int __builtin_ia32_comige (v4sf, v4sf) -int __builtin_ia32_ucomieq (v4sf, v4sf) -int __builtin_ia32_ucomineq (v4sf, v4sf) -int __builtin_ia32_ucomilt (v4sf, v4sf) -int __builtin_ia32_ucomile (v4sf, v4sf) -int __builtin_ia32_ucomigt (v4sf, v4sf) -int __builtin_ia32_ucomige (v4sf, v4sf) -v4sf __builtin_ia32_addps (v4sf, v4sf) -v4sf __builtin_ia32_subps (v4sf, v4sf) -v4sf __builtin_ia32_mulps (v4sf, v4sf) -v4sf __builtin_ia32_divps (v4sf, v4sf) -v4sf __builtin_ia32_addss (v4sf, v4sf) -v4sf __builtin_ia32_subss (v4sf, v4sf) -v4sf __builtin_ia32_mulss (v4sf, v4sf) -v4sf __builtin_ia32_divss (v4sf, v4sf) -v4sf __builtin_ia32_cmpeqps (v4sf, v4sf) -v4sf __builtin_ia32_cmpltps (v4sf, v4sf) -v4sf __builtin_ia32_cmpleps (v4sf, v4sf) -v4sf __builtin_ia32_cmpgtps (v4sf, v4sf) -v4sf __builtin_ia32_cmpgeps (v4sf, v4sf) -v4sf __builtin_ia32_cmpunordps (v4sf, v4sf) -v4sf __builtin_ia32_cmpneqps (v4sf, v4sf) -v4sf __builtin_ia32_cmpnltps (v4sf, v4sf) -v4sf __builtin_ia32_cmpnleps (v4sf, v4sf) -v4sf __builtin_ia32_cmpngtps (v4sf, v4sf) -v4sf __builtin_ia32_cmpngeps (v4sf, v4sf) -v4sf __builtin_ia32_cmpordps (v4sf, v4sf) -v4sf __builtin_ia32_cmpeqss (v4sf, v4sf) -v4sf __builtin_ia32_cmpltss (v4sf, v4sf) -v4sf __builtin_ia32_cmpless (v4sf, v4sf) -v4sf __builtin_ia32_cmpunordss (v4sf, v4sf) -v4sf __builtin_ia32_cmpneqss (v4sf, v4sf) -v4sf __builtin_ia32_cmpnltss (v4sf, v4sf) -v4sf __builtin_ia32_cmpnless (v4sf, v4sf) -v4sf __builtin_ia32_cmpordss (v4sf, v4sf) -v4sf __builtin_ia32_maxps (v4sf, v4sf) -v4sf __builtin_ia32_maxss (v4sf, v4sf) -v4sf __builtin_ia32_minps (v4sf, v4sf) -v4sf __builtin_ia32_minss (v4sf, v4sf) -v4sf __builtin_ia32_andps (v4sf, v4sf) -v4sf __builtin_ia32_andnps (v4sf, v4sf) -v4sf __builtin_ia32_orps (v4sf, v4sf) -v4sf __builtin_ia32_xorps (v4sf, v4sf) -v4sf __builtin_ia32_movss (v4sf, v4sf) -v4sf __builtin_ia32_movhlps (v4sf, v4sf) -v4sf __builtin_ia32_movlhps (v4sf, v4sf) -v4sf __builtin_ia32_unpckhps (v4sf, v4sf) -v4sf __builtin_ia32_unpcklps (v4sf, v4sf) -v4sf __builtin_ia32_cvtpi2ps (v4sf, v2si) -v4sf __builtin_ia32_cvtsi2ss (v4sf, int) -v2si __builtin_ia32_cvtps2pi (v4sf) -int __builtin_ia32_cvtss2si (v4sf) -v2si __builtin_ia32_cvttps2pi (v4sf) -int __builtin_ia32_cvttss2si (v4sf) -v4sf __builtin_ia32_rcpps (v4sf) -v4sf __builtin_ia32_rsqrtps (v4sf) -v4sf __builtin_ia32_sqrtps (v4sf) -v4sf __builtin_ia32_rcpss (v4sf) -v4sf __builtin_ia32_rsqrtss (v4sf) -v4sf __builtin_ia32_sqrtss (v4sf) -v4sf __builtin_ia32_shufps (v4sf, v4sf, int) -void __builtin_ia32_movntps (float *, v4sf) -int __builtin_ia32_movmskps (v4sf) +int __builtin_ia32_comieq (v4sf, v4sf); +int __builtin_ia32_comineq (v4sf, v4sf); +int __builtin_ia32_comilt (v4sf, v4sf); +int __builtin_ia32_comile (v4sf, v4sf); +int __builtin_ia32_comigt (v4sf, v4sf); +int __builtin_ia32_comige (v4sf, v4sf); +int __builtin_ia32_ucomieq (v4sf, v4sf); +int __builtin_ia32_ucomineq (v4sf, v4sf); +int __builtin_ia32_ucomilt (v4sf, v4sf); +int __builtin_ia32_ucomile (v4sf, v4sf); +int __builtin_ia32_ucomigt (v4sf, v4sf); +int __builtin_ia32_ucomige (v4sf, v4sf); +v4sf __builtin_ia32_addps (v4sf, v4sf); +v4sf __builtin_ia32_subps (v4sf, v4sf); +v4sf __builtin_ia32_mulps (v4sf, v4sf); +v4sf __builtin_ia32_divps (v4sf, v4sf); +v4sf __builtin_ia32_addss (v4sf, v4sf); +v4sf __builtin_ia32_subss (v4sf, v4sf); +v4sf __builtin_ia32_mulss (v4sf, v4sf); +v4sf __builtin_ia32_divss (v4sf, v4sf); +v4sf __builtin_ia32_cmpeqps (v4sf, v4sf); +v4sf __builtin_ia32_cmpltps (v4sf, v4sf); +v4sf __builtin_ia32_cmpleps (v4sf, v4sf); +v4sf __builtin_ia32_cmpgtps (v4sf, v4sf); +v4sf __builtin_ia32_cmpgeps (v4sf, v4sf); +v4sf __builtin_ia32_cmpunordps (v4sf, v4sf); +v4sf __builtin_ia32_cmpneqps (v4sf, v4sf); +v4sf __builtin_ia32_cmpnltps (v4sf, v4sf); +v4sf __builtin_ia32_cmpnleps (v4sf, v4sf); +v4sf __builtin_ia32_cmpngtps (v4sf, v4sf); +v4sf __builtin_ia32_cmpngeps (v4sf, v4sf); +v4sf __builtin_ia32_cmpordps (v4sf, v4sf); +v4sf __builtin_ia32_cmpeqss (v4sf, v4sf); +v4sf __builtin_ia32_cmpltss (v4sf, v4sf); +v4sf __builtin_ia32_cmpless (v4sf, v4sf); +v4sf __builtin_ia32_cmpunordss (v4sf, v4sf); +v4sf __builtin_ia32_cmpneqss (v4sf, v4sf); +v4sf __builtin_ia32_cmpnltss (v4sf, v4sf); +v4sf __builtin_ia32_cmpnless (v4sf, v4sf); +v4sf __builtin_ia32_cmpordss (v4sf, v4sf); +v4sf __builtin_ia32_maxps (v4sf, v4sf); +v4sf __builtin_ia32_maxss (v4sf, v4sf); +v4sf __builtin_ia32_minps (v4sf, v4sf); +v4sf __builtin_ia32_minss (v4sf, v4sf); +v4sf __builtin_ia32_andps (v4sf, v4sf); +v4sf __builtin_ia32_andnps (v4sf, v4sf); +v4sf __builtin_ia32_orps (v4sf, v4sf); +v4sf __builtin_ia32_xorps (v4sf, v4sf); +v4sf __builtin_ia32_movss (v4sf, v4sf); +v4sf __builtin_ia32_movhlps (v4sf, v4sf); +v4sf __builtin_ia32_movlhps (v4sf, v4sf); +v4sf __builtin_ia32_unpckhps (v4sf, v4sf); +v4sf __builtin_ia32_unpcklps (v4sf, v4sf); +v4sf __builtin_ia32_cvtpi2ps (v4sf, v2si); +v4sf __builtin_ia32_cvtsi2ss (v4sf, int); +v2si __builtin_ia32_cvtps2pi (v4sf); +int __builtin_ia32_cvtss2si (v4sf); +v2si __builtin_ia32_cvttps2pi (v4sf); +int __builtin_ia32_cvttss2si (v4sf); +v4sf __builtin_ia32_rcpps (v4sf); +v4sf __builtin_ia32_rsqrtps (v4sf); +v4sf __builtin_ia32_sqrtps (v4sf); +v4sf __builtin_ia32_rcpss (v4sf); +v4sf __builtin_ia32_rsqrtss (v4sf); +v4sf __builtin_ia32_sqrtss (v4sf); +v4sf __builtin_ia32_shufps (v4sf, v4sf, int); +void __builtin_ia32_movntps (float *, v4sf); +int __builtin_ia32_movmskps (v4sf); @end smallexample The following built-in functions are available when @option{-msse} is used. @@ -22087,222 +22080,222 @@ The following built-in functions are available when @option{-msse2} is used. All of them generate the machine instruction that is part of the name. @smallexample -int __builtin_ia32_comisdeq (v2df, v2df) -int __builtin_ia32_comisdlt (v2df, v2df) -int __builtin_ia32_comisdle (v2df, v2df) -int __builtin_ia32_comisdgt (v2df, v2df) -int __builtin_ia32_comisdge (v2df, v2df) -int __builtin_ia32_comisdneq (v2df, v2df) -int __builtin_ia32_ucomisdeq (v2df, v2df) -int __builtin_ia32_ucomisdlt (v2df, v2df) -int __builtin_ia32_ucomisdle (v2df, v2df) -int __builtin_ia32_ucomisdgt (v2df, v2df) -int __builtin_ia32_ucomisdge (v2df, v2df) -int __builtin_ia32_ucomisdneq (v2df, v2df) -v2df __builtin_ia32_cmpeqpd (v2df, v2df) -v2df __builtin_ia32_cmpltpd (v2df, v2df) -v2df __builtin_ia32_cmplepd (v2df, v2df) -v2df __builtin_ia32_cmpgtpd (v2df, v2df) -v2df __builtin_ia32_cmpgepd (v2df, v2df) -v2df __builtin_ia32_cmpunordpd (v2df, v2df) -v2df __builtin_ia32_cmpneqpd (v2df, v2df) -v2df __builtin_ia32_cmpnltpd (v2df, v2df) -v2df __builtin_ia32_cmpnlepd (v2df, v2df) -v2df __builtin_ia32_cmpngtpd (v2df, v2df) -v2df __builtin_ia32_cmpngepd (v2df, v2df) -v2df __builtin_ia32_cmpordpd (v2df, v2df) -v2df __builtin_ia32_cmpeqsd (v2df, v2df) -v2df __builtin_ia32_cmpltsd (v2df, v2df) -v2df __builtin_ia32_cmplesd (v2df, v2df) -v2df __builtin_ia32_cmpunordsd (v2df, v2df) -v2df __builtin_ia32_cmpneqsd (v2df, v2df) -v2df __builtin_ia32_cmpnltsd (v2df, v2df) -v2df __builtin_ia32_cmpnlesd (v2df, v2df) -v2df __builtin_ia32_cmpordsd (v2df, v2df) -v2di __builtin_ia32_paddq (v2di, v2di) -v2di __builtin_ia32_psubq (v2di, v2di) -v2df __builtin_ia32_addpd (v2df, v2df) -v2df __builtin_ia32_subpd (v2df, v2df) -v2df __builtin_ia32_mulpd (v2df, v2df) -v2df __builtin_ia32_divpd (v2df, v2df) -v2df __builtin_ia32_addsd (v2df, v2df) -v2df __builtin_ia32_subsd (v2df, v2df) -v2df __builtin_ia32_mulsd (v2df, v2df) -v2df __builtin_ia32_divsd (v2df, v2df) -v2df __builtin_ia32_minpd (v2df, v2df) -v2df __builtin_ia32_maxpd (v2df, v2df) -v2df __builtin_ia32_minsd (v2df, v2df) -v2df __builtin_ia32_maxsd (v2df, v2df) -v2df __builtin_ia32_andpd (v2df, v2df) -v2df __builtin_ia32_andnpd (v2df, v2df) -v2df __builtin_ia32_orpd (v2df, v2df) -v2df __builtin_ia32_xorpd (v2df, v2df) -v2df __builtin_ia32_movsd (v2df, v2df) -v2df __builtin_ia32_unpckhpd (v2df, v2df) -v2df __builtin_ia32_unpcklpd (v2df, v2df) -v16qi __builtin_ia32_paddb128 (v16qi, v16qi) -v8hi __builtin_ia32_paddw128 (v8hi, v8hi) -v4si __builtin_ia32_paddd128 (v4si, v4si) -v2di __builtin_ia32_paddq128 (v2di, v2di) -v16qi __builtin_ia32_psubb128 (v16qi, v16qi) -v8hi __builtin_ia32_psubw128 (v8hi, v8hi) -v4si __builtin_ia32_psubd128 (v4si, v4si) -v2di __builtin_ia32_psubq128 (v2di, v2di) -v8hi __builtin_ia32_pmullw128 (v8hi, v8hi) -v8hi __builtin_ia32_pmulhw128 (v8hi, v8hi) -v2di __builtin_ia32_pand128 (v2di, v2di) -v2di __builtin_ia32_pandn128 (v2di, v2di) -v2di __builtin_ia32_por128 (v2di, v2di) -v2di __builtin_ia32_pxor128 (v2di, v2di) -v16qi __builtin_ia32_pavgb128 (v16qi, v16qi) -v8hi __builtin_ia32_pavgw128 (v8hi, v8hi) -v16qi __builtin_ia32_pcmpeqb128 (v16qi, v16qi) -v8hi __builtin_ia32_pcmpeqw128 (v8hi, v8hi) -v4si __builtin_ia32_pcmpeqd128 (v4si, v4si) -v16qi __builtin_ia32_pcmpgtb128 (v16qi, v16qi) -v8hi __builtin_ia32_pcmpgtw128 (v8hi, v8hi) -v4si __builtin_ia32_pcmpgtd128 (v4si, v4si) -v16qi __builtin_ia32_pmaxub128 (v16qi, v16qi) -v8hi __builtin_ia32_pmaxsw128 (v8hi, v8hi) -v16qi __builtin_ia32_pminub128 (v16qi, v16qi) -v8hi __builtin_ia32_pminsw128 (v8hi, v8hi) -v16qi __builtin_ia32_punpckhbw128 (v16qi, v16qi) -v8hi __builtin_ia32_punpckhwd128 (v8hi, v8hi) -v4si __builtin_ia32_punpckhdq128 (v4si, v4si) -v2di __builtin_ia32_punpckhqdq128 (v2di, v2di) -v16qi __builtin_ia32_punpcklbw128 (v16qi, v16qi) -v8hi __builtin_ia32_punpcklwd128 (v8hi, v8hi) -v4si __builtin_ia32_punpckldq128 (v4si, v4si) -v2di __builtin_ia32_punpcklqdq128 (v2di, v2di) -v16qi __builtin_ia32_packsswb128 (v8hi, v8hi) -v8hi __builtin_ia32_packssdw128 (v4si, v4si) -v16qi __builtin_ia32_packuswb128 (v8hi, v8hi) -v8hi __builtin_ia32_pmulhuw128 (v8hi, v8hi) -void __builtin_ia32_maskmovdqu (v16qi, v16qi) -v2df __builtin_ia32_loadupd (double *) -void __builtin_ia32_storeupd (double *, v2df) -v2df __builtin_ia32_loadhpd (v2df, double const *) -v2df __builtin_ia32_loadlpd (v2df, double const *) -int __builtin_ia32_movmskpd (v2df) -int __builtin_ia32_pmovmskb128 (v16qi) -void __builtin_ia32_movnti (int *, int) -void __builtin_ia32_movnti64 (long long int *, long long int) -void __builtin_ia32_movntpd (double *, v2df) -void __builtin_ia32_movntdq (v2df *, v2df) -v4si __builtin_ia32_pshufd (v4si, int) -v8hi __builtin_ia32_pshuflw (v8hi, int) -v8hi __builtin_ia32_pshufhw (v8hi, int) -v2di __builtin_ia32_psadbw128 (v16qi, v16qi) -v2df __builtin_ia32_sqrtpd (v2df) -v2df __builtin_ia32_sqrtsd (v2df) -v2df __builtin_ia32_shufpd (v2df, v2df, int) -v2df __builtin_ia32_cvtdq2pd (v4si) -v4sf __builtin_ia32_cvtdq2ps (v4si) -v4si __builtin_ia32_cvtpd2dq (v2df) -v2si __builtin_ia32_cvtpd2pi (v2df) -v4sf __builtin_ia32_cvtpd2ps (v2df) -v4si __builtin_ia32_cvttpd2dq (v2df) -v2si __builtin_ia32_cvttpd2pi (v2df) -v2df __builtin_ia32_cvtpi2pd (v2si) -int __builtin_ia32_cvtsd2si (v2df) -int __builtin_ia32_cvttsd2si (v2df) -long long __builtin_ia32_cvtsd2si64 (v2df) -long long __builtin_ia32_cvttsd2si64 (v2df) -v4si __builtin_ia32_cvtps2dq (v4sf) -v2df __builtin_ia32_cvtps2pd (v4sf) -v4si __builtin_ia32_cvttps2dq (v4sf) -v2df __builtin_ia32_cvtsi2sd (v2df, int) -v2df __builtin_ia32_cvtsi642sd (v2df, long long) -v4sf __builtin_ia32_cvtsd2ss (v4sf, v2df) -v2df __builtin_ia32_cvtss2sd (v2df, v4sf) -void __builtin_ia32_clflush (const void *) -void __builtin_ia32_lfence (void) -void __builtin_ia32_mfence (void) -v16qi __builtin_ia32_loaddqu (const char *) -void __builtin_ia32_storedqu (char *, v16qi) -v1di __builtin_ia32_pmuludq (v2si, v2si) -v2di __builtin_ia32_pmuludq128 (v4si, v4si) -v8hi __builtin_ia32_psllw128 (v8hi, v8hi) -v4si __builtin_ia32_pslld128 (v4si, v4si) -v2di __builtin_ia32_psllq128 (v2di, v2di) -v8hi __builtin_ia32_psrlw128 (v8hi, v8hi) -v4si __builtin_ia32_psrld128 (v4si, v4si) -v2di __builtin_ia32_psrlq128 (v2di, v2di) -v8hi __builtin_ia32_psraw128 (v8hi, v8hi) -v4si __builtin_ia32_psrad128 (v4si, v4si) -v2di __builtin_ia32_pslldqi128 (v2di, int) -v8hi __builtin_ia32_psllwi128 (v8hi, int) -v4si __builtin_ia32_pslldi128 (v4si, int) -v2di __builtin_ia32_psllqi128 (v2di, int) -v2di __builtin_ia32_psrldqi128 (v2di, int) -v8hi __builtin_ia32_psrlwi128 (v8hi, int) -v4si __builtin_ia32_psrldi128 (v4si, int) -v2di __builtin_ia32_psrlqi128 (v2di, int) -v8hi __builtin_ia32_psrawi128 (v8hi, int) -v4si __builtin_ia32_psradi128 (v4si, int) -v4si __builtin_ia32_pmaddwd128 (v8hi, v8hi) -v2di __builtin_ia32_movq128 (v2di) +int __builtin_ia32_comisdeq (v2df, v2df); +int __builtin_ia32_comisdlt (v2df, v2df); +int __builtin_ia32_comisdle (v2df, v2df); +int __builtin_ia32_comisdgt (v2df, v2df); +int __builtin_ia32_comisdge (v2df, v2df); +int __builtin_ia32_comisdneq (v2df, v2df); +int __builtin_ia32_ucomisdeq (v2df, v2df); +int __builtin_ia32_ucomisdlt (v2df, v2df); +int __builtin_ia32_ucomisdle (v2df, v2df); +int __builtin_ia32_ucomisdgt (v2df, v2df); +int __builtin_ia32_ucomisdge (v2df, v2df); +int __builtin_ia32_ucomisdneq (v2df, v2df); +v2df __builtin_ia32_cmpeqpd (v2df, v2df); +v2df __builtin_ia32_cmpltpd (v2df, v2df); +v2df __builtin_ia32_cmplepd (v2df, v2df); +v2df __builtin_ia32_cmpgtpd (v2df, v2df); +v2df __builtin_ia32_cmpgepd (v2df, v2df); +v2df __builtin_ia32_cmpunordpd (v2df, v2df); +v2df __builtin_ia32_cmpneqpd (v2df, v2df); +v2df __builtin_ia32_cmpnltpd (v2df, v2df); +v2df __builtin_ia32_cmpnlepd (v2df, v2df); +v2df __builtin_ia32_cmpngtpd (v2df, v2df); +v2df __builtin_ia32_cmpngepd (v2df, v2df); +v2df __builtin_ia32_cmpordpd (v2df, v2df); +v2df __builtin_ia32_cmpeqsd (v2df, v2df); +v2df __builtin_ia32_cmpltsd (v2df, v2df); +v2df __builtin_ia32_cmplesd (v2df, v2df); +v2df __builtin_ia32_cmpunordsd (v2df, v2df); +v2df __builtin_ia32_cmpneqsd (v2df, v2df); +v2df __builtin_ia32_cmpnltsd (v2df, v2df); +v2df __builtin_ia32_cmpnlesd (v2df, v2df); +v2df __builtin_ia32_cmpordsd (v2df, v2df); +v2di __builtin_ia32_paddq (v2di, v2di); +v2di __builtin_ia32_psubq (v2di, v2di); +v2df __builtin_ia32_addpd (v2df, v2df); +v2df __builtin_ia32_subpd (v2df, v2df); +v2df __builtin_ia32_mulpd (v2df, v2df); +v2df __builtin_ia32_divpd (v2df, v2df); +v2df __builtin_ia32_addsd (v2df, v2df); +v2df __builtin_ia32_subsd (v2df, v2df); +v2df __builtin_ia32_mulsd (v2df, v2df); +v2df __builtin_ia32_divsd (v2df, v2df); +v2df __builtin_ia32_minpd (v2df, v2df); +v2df __builtin_ia32_maxpd (v2df, v2df); +v2df __builtin_ia32_minsd (v2df, v2df); +v2df __builtin_ia32_maxsd (v2df, v2df); +v2df __builtin_ia32_andpd (v2df, v2df); +v2df __builtin_ia32_andnpd (v2df, v2df); +v2df __builtin_ia32_orpd (v2df, v2df); +v2df __builtin_ia32_xorpd (v2df, v2df); +v2df __builtin_ia32_movsd (v2df, v2df); +v2df __builtin_ia32_unpckhpd (v2df, v2df); +v2df __builtin_ia32_unpcklpd (v2df, v2df); +v16qi __builtin_ia32_paddb128 (v16qi, v16qi); +v8hi __builtin_ia32_paddw128 (v8hi, v8hi); +v4si __builtin_ia32_paddd128 (v4si, v4si); +v2di __builtin_ia32_paddq128 (v2di, v2di); +v16qi __builtin_ia32_psubb128 (v16qi, v16qi); +v8hi __builtin_ia32_psubw128 (v8hi, v8hi); +v4si __builtin_ia32_psubd128 (v4si, v4si); +v2di __builtin_ia32_psubq128 (v2di, v2di); +v8hi __builtin_ia32_pmullw128 (v8hi, v8hi); +v8hi __builtin_ia32_pmulhw128 (v8hi, v8hi); +v2di __builtin_ia32_pand128 (v2di, v2di); +v2di __builtin_ia32_pandn128 (v2di, v2di); +v2di __builtin_ia32_por128 (v2di, v2di); +v2di __builtin_ia32_pxor128 (v2di, v2di); +v16qi __builtin_ia32_pavgb128 (v16qi, v16qi); +v8hi __builtin_ia32_pavgw128 (v8hi, v8hi); +v16qi __builtin_ia32_pcmpeqb128 (v16qi, v16qi); +v8hi __builtin_ia32_pcmpeqw128 (v8hi, v8hi); +v4si __builtin_ia32_pcmpeqd128 (v4si, v4si); +v16qi __builtin_ia32_pcmpgtb128 (v16qi, v16qi); +v8hi __builtin_ia32_pcmpgtw128 (v8hi, v8hi); +v4si __builtin_ia32_pcmpgtd128 (v4si, v4si); +v16qi __builtin_ia32_pmaxub128 (v16qi, v16qi); +v8hi __builtin_ia32_pmaxsw128 (v8hi, v8hi); +v16qi __builtin_ia32_pminub128 (v16qi, v16qi); +v8hi __builtin_ia32_pminsw128 (v8hi, v8hi); +v16qi __builtin_ia32_punpckhbw128 (v16qi, v16qi); +v8hi __builtin_ia32_punpckhwd128 (v8hi, v8hi); +v4si __builtin_ia32_punpckhdq128 (v4si, v4si); +v2di __builtin_ia32_punpckhqdq128 (v2di, v2di); +v16qi __builtin_ia32_punpcklbw128 (v16qi, v16qi); +v8hi __builtin_ia32_punpcklwd128 (v8hi, v8hi); +v4si __builtin_ia32_punpckldq128 (v4si, v4si); +v2di __builtin_ia32_punpcklqdq128 (v2di, v2di); +v16qi __builtin_ia32_packsswb128 (v8hi, v8hi); +v8hi __builtin_ia32_packssdw128 (v4si, v4si); +v16qi __builtin_ia32_packuswb128 (v8hi, v8hi); +v8hi __builtin_ia32_pmulhuw128 (v8hi, v8hi); +void __builtin_ia32_maskmovdqu (v16qi, v16qi); +v2df __builtin_ia32_loadupd (double *); +void __builtin_ia32_storeupd (double *, v2df); +v2df __builtin_ia32_loadhpd (v2df, double const *); +v2df __builtin_ia32_loadlpd (v2df, double const *); +int __builtin_ia32_movmskpd (v2df); +int __builtin_ia32_pmovmskb128 (v16qi); +void __builtin_ia32_movnti (int *, int); +void __builtin_ia32_movnti64 (long long int *, long long int); +void __builtin_ia32_movntpd (double *, v2df); +void __builtin_ia32_movntdq (v2df *, v2df); +v4si __builtin_ia32_pshufd (v4si, int); +v8hi __builtin_ia32_pshuflw (v8hi, int); +v8hi __builtin_ia32_pshufhw (v8hi, int); +v2di __builtin_ia32_psadbw128 (v16qi, v16qi); +v2df __builtin_ia32_sqrtpd (v2df); +v2df __builtin_ia32_sqrtsd (v2df); +v2df __builtin_ia32_shufpd (v2df, v2df, int); +v2df __builtin_ia32_cvtdq2pd (v4si); +v4sf __builtin_ia32_cvtdq2ps (v4si); +v4si __builtin_ia32_cvtpd2dq (v2df); +v2si __builtin_ia32_cvtpd2pi (v2df); +v4sf __builtin_ia32_cvtpd2ps (v2df); +v4si __builtin_ia32_cvttpd2dq (v2df); +v2si __builtin_ia32_cvttpd2pi (v2df); +v2df __builtin_ia32_cvtpi2pd (v2si); +int __builtin_ia32_cvtsd2si (v2df); +int __builtin_ia32_cvttsd2si (v2df); +long long __builtin_ia32_cvtsd2si64 (v2df); +long long __builtin_ia32_cvttsd2si64 (v2df); +v4si __builtin_ia32_cvtps2dq (v4sf); +v2df __builtin_ia32_cvtps2pd (v4sf); +v4si __builtin_ia32_cvttps2dq (v4sf); +v2df __builtin_ia32_cvtsi2sd (v2df, int); +v2df __builtin_ia32_cvtsi642sd (v2df, long long); +v4sf __builtin_ia32_cvtsd2ss (v4sf, v2df); +v2df __builtin_ia32_cvtss2sd (v2df, v4sf); +void __builtin_ia32_clflush (const void *); +void __builtin_ia32_lfence (void); +void __builtin_ia32_mfence (void); +v16qi __builtin_ia32_loaddqu (const char *); +void __builtin_ia32_storedqu (char *, v16qi); +v1di __builtin_ia32_pmuludq (v2si, v2si); +v2di __builtin_ia32_pmuludq128 (v4si, v4si); +v8hi __builtin_ia32_psllw128 (v8hi, v8hi); +v4si __builtin_ia32_pslld128 (v4si, v4si); +v2di __builtin_ia32_psllq128 (v2di, v2di); +v8hi __builtin_ia32_psrlw128 (v8hi, v8hi); +v4si __builtin_ia32_psrld128 (v4si, v4si); +v2di __builtin_ia32_psrlq128 (v2di, v2di); +v8hi __builtin_ia32_psraw128 (v8hi, v8hi); +v4si __builtin_ia32_psrad128 (v4si, v4si); +v2di __builtin_ia32_pslldqi128 (v2di, int); +v8hi __builtin_ia32_psllwi128 (v8hi, int); +v4si __builtin_ia32_pslldi128 (v4si, int); +v2di __builtin_ia32_psllqi128 (v2di, int); +v2di __builtin_ia32_psrldqi128 (v2di, int); +v8hi __builtin_ia32_psrlwi128 (v8hi, int); +v4si __builtin_ia32_psrldi128 (v4si, int); +v2di __builtin_ia32_psrlqi128 (v2di, int); +v8hi __builtin_ia32_psrawi128 (v8hi, int); +v4si __builtin_ia32_psradi128 (v4si, int); +v4si __builtin_ia32_pmaddwd128 (v8hi, v8hi); +v2di __builtin_ia32_movq128 (v2di); @end smallexample The following built-in functions are available when @option{-msse3} is used. All of them generate the machine instruction that is part of the name. @smallexample -v2df __builtin_ia32_addsubpd (v2df, v2df) -v4sf __builtin_ia32_addsubps (v4sf, v4sf) -v2df __builtin_ia32_haddpd (v2df, v2df) -v4sf __builtin_ia32_haddps (v4sf, v4sf) -v2df __builtin_ia32_hsubpd (v2df, v2df) -v4sf __builtin_ia32_hsubps (v4sf, v4sf) -v16qi __builtin_ia32_lddqu (char const *) -void __builtin_ia32_monitor (void *, unsigned int, unsigned int) -v4sf __builtin_ia32_movshdup (v4sf) -v4sf __builtin_ia32_movsldup (v4sf) -void __builtin_ia32_mwait (unsigned int, unsigned int) +v2df __builtin_ia32_addsubpd (v2df, v2df); +v4sf __builtin_ia32_addsubps (v4sf, v4sf); +v2df __builtin_ia32_haddpd (v2df, v2df); +v4sf __builtin_ia32_haddps (v4sf, v4sf); +v2df __builtin_ia32_hsubpd (v2df, v2df); +v4sf __builtin_ia32_hsubps (v4sf, v4sf); +v16qi __builtin_ia32_lddqu (char const *); +void __builtin_ia32_monitor (void *, unsigned int, unsigned int); +v4sf __builtin_ia32_movshdup (v4sf); +v4sf __builtin_ia32_movsldup (v4sf); +void __builtin_ia32_mwait (unsigned int, unsigned int); @end smallexample The following built-in functions are available when @option{-mssse3} is used. All of them generate the machine instruction that is part of the name. @smallexample -v2si __builtin_ia32_phaddd (v2si, v2si) -v4hi __builtin_ia32_phaddw (v4hi, v4hi) -v4hi __builtin_ia32_phaddsw (v4hi, v4hi) -v2si __builtin_ia32_phsubd (v2si, v2si) -v4hi __builtin_ia32_phsubw (v4hi, v4hi) -v4hi __builtin_ia32_phsubsw (v4hi, v4hi) -v4hi __builtin_ia32_pmaddubsw (v8qi, v8qi) -v4hi __builtin_ia32_pmulhrsw (v4hi, v4hi) -v8qi __builtin_ia32_pshufb (v8qi, v8qi) -v8qi __builtin_ia32_psignb (v8qi, v8qi) -v2si __builtin_ia32_psignd (v2si, v2si) -v4hi __builtin_ia32_psignw (v4hi, v4hi) -v1di __builtin_ia32_palignr (v1di, v1di, int) -v8qi __builtin_ia32_pabsb (v8qi) -v2si __builtin_ia32_pabsd (v2si) -v4hi __builtin_ia32_pabsw (v4hi) +v2si __builtin_ia32_phaddd (v2si, v2si); +v4hi __builtin_ia32_phaddw (v4hi, v4hi); +v4hi __builtin_ia32_phaddsw (v4hi, v4hi); +v2si __builtin_ia32_phsubd (v2si, v2si); +v4hi __builtin_ia32_phsubw (v4hi, v4hi); +v4hi __builtin_ia32_phsubsw (v4hi, v4hi); +v4hi __builtin_ia32_pmaddubsw (v8qi, v8qi); +v4hi __builtin_ia32_pmulhrsw (v4hi, v4hi); +v8qi __builtin_ia32_pshufb (v8qi, v8qi); +v8qi __builtin_ia32_psignb (v8qi, v8qi); +v2si __builtin_ia32_psignd (v2si, v2si); +v4hi __builtin_ia32_psignw (v4hi, v4hi); +v1di __builtin_ia32_palignr (v1di, v1di, int); +v8qi __builtin_ia32_pabsb (v8qi); +v2si __builtin_ia32_pabsd (v2si); +v4hi __builtin_ia32_pabsw (v4hi); @end smallexample The following built-in functions are available when @option{-mssse3} is used. All of them generate the machine instruction that is part of the name. @smallexample -v4si __builtin_ia32_phaddd128 (v4si, v4si) -v8hi __builtin_ia32_phaddw128 (v8hi, v8hi) -v8hi __builtin_ia32_phaddsw128 (v8hi, v8hi) -v4si __builtin_ia32_phsubd128 (v4si, v4si) -v8hi __builtin_ia32_phsubw128 (v8hi, v8hi) -v8hi __builtin_ia32_phsubsw128 (v8hi, v8hi) -v8hi __builtin_ia32_pmaddubsw128 (v16qi, v16qi) -v8hi __builtin_ia32_pmulhrsw128 (v8hi, v8hi) -v16qi __builtin_ia32_pshufb128 (v16qi, v16qi) -v16qi __builtin_ia32_psignb128 (v16qi, v16qi) -v4si __builtin_ia32_psignd128 (v4si, v4si) -v8hi __builtin_ia32_psignw128 (v8hi, v8hi) -v2di __builtin_ia32_palignr128 (v2di, v2di, int) -v16qi __builtin_ia32_pabsb128 (v16qi) -v4si __builtin_ia32_pabsd128 (v4si) -v8hi __builtin_ia32_pabsw128 (v8hi) +v4si __builtin_ia32_phaddd128 (v4si, v4si); +v8hi __builtin_ia32_phaddw128 (v8hi, v8hi); +v8hi __builtin_ia32_phaddsw128 (v8hi, v8hi); +v4si __builtin_ia32_phsubd128 (v4si, v4si); +v8hi __builtin_ia32_phsubw128 (v8hi, v8hi); +v8hi __builtin_ia32_phsubsw128 (v8hi, v8hi); +v8hi __builtin_ia32_pmaddubsw128 (v16qi, v16qi); +v8hi __builtin_ia32_pmulhrsw128 (v8hi, v8hi); +v16qi __builtin_ia32_pshufb128 (v16qi, v16qi); +v16qi __builtin_ia32_psignb128 (v16qi, v16qi); +v4si __builtin_ia32_psignd128 (v4si, v4si); +v8hi __builtin_ia32_psignw128 (v8hi, v8hi); +v2di __builtin_ia32_palignr128 (v2di, v2di, int); +v16qi __builtin_ia32_pabsb128 (v16qi); +v4si __builtin_ia32_pabsd128 (v4si); +v8hi __builtin_ia32_pabsw128 (v8hi); @end smallexample The following built-in functions are available when @option{-msse4.1} is @@ -22310,49 +22303,49 @@ used. All of them generate the machine instruction that is part of the name. @smallexample -v2df __builtin_ia32_blendpd (v2df, v2df, const int) -v4sf __builtin_ia32_blendps (v4sf, v4sf, const int) -v2df __builtin_ia32_blendvpd (v2df, v2df, v2df) -v4sf __builtin_ia32_blendvps (v4sf, v4sf, v4sf) -v2df __builtin_ia32_dppd (v2df, v2df, const int) -v4sf __builtin_ia32_dpps (v4sf, v4sf, const int) -v4sf __builtin_ia32_insertps128 (v4sf, v4sf, const int) +v2df __builtin_ia32_blendpd (v2df, v2df, const int); +v4sf __builtin_ia32_blendps (v4sf, v4sf, const int); +v2df __builtin_ia32_blendvpd (v2df, v2df, v2df); +v4sf __builtin_ia32_blendvps (v4sf, v4sf, v4sf); +v2df __builtin_ia32_dppd (v2df, v2df, const int); +v4sf __builtin_ia32_dpps (v4sf, v4sf, const int); +v4sf __builtin_ia32_insertps128 (v4sf, v4sf, const int); v2di __builtin_ia32_movntdqa (v2di *); -v16qi __builtin_ia32_mpsadbw128 (v16qi, v16qi, const int) -v8hi __builtin_ia32_packusdw128 (v4si, v4si) -v16qi __builtin_ia32_pblendvb128 (v16qi, v16qi, v16qi) -v8hi __builtin_ia32_pblendw128 (v8hi, v8hi, const int) -v2di __builtin_ia32_pcmpeqq (v2di, v2di) -v8hi __builtin_ia32_phminposuw128 (v8hi) -v16qi __builtin_ia32_pmaxsb128 (v16qi, v16qi) -v4si __builtin_ia32_pmaxsd128 (v4si, v4si) -v4si __builtin_ia32_pmaxud128 (v4si, v4si) -v8hi __builtin_ia32_pmaxuw128 (v8hi, v8hi) -v16qi __builtin_ia32_pminsb128 (v16qi, v16qi) -v4si __builtin_ia32_pminsd128 (v4si, v4si) -v4si __builtin_ia32_pminud128 (v4si, v4si) -v8hi __builtin_ia32_pminuw128 (v8hi, v8hi) -v4si __builtin_ia32_pmovsxbd128 (v16qi) -v2di __builtin_ia32_pmovsxbq128 (v16qi) -v8hi __builtin_ia32_pmovsxbw128 (v16qi) -v2di __builtin_ia32_pmovsxdq128 (v4si) -v4si __builtin_ia32_pmovsxwd128 (v8hi) -v2di __builtin_ia32_pmovsxwq128 (v8hi) -v4si __builtin_ia32_pmovzxbd128 (v16qi) -v2di __builtin_ia32_pmovzxbq128 (v16qi) -v8hi __builtin_ia32_pmovzxbw128 (v16qi) -v2di __builtin_ia32_pmovzxdq128 (v4si) -v4si __builtin_ia32_pmovzxwd128 (v8hi) -v2di __builtin_ia32_pmovzxwq128 (v8hi) -v2di __builtin_ia32_pmuldq128 (v4si, v4si) -v4si __builtin_ia32_pmulld128 (v4si, v4si) -int __builtin_ia32_ptestc128 (v2di, v2di) -int __builtin_ia32_ptestnzc128 (v2di, v2di) -int __builtin_ia32_ptestz128 (v2di, v2di) -v2df __builtin_ia32_roundpd (v2df, const int) -v4sf __builtin_ia32_roundps (v4sf, const int) -v2df __builtin_ia32_roundsd (v2df, v2df, const int) -v4sf __builtin_ia32_roundss (v4sf, v4sf, const int) +v16qi __builtin_ia32_mpsadbw128 (v16qi, v16qi, const int); +v8hi __builtin_ia32_packusdw128 (v4si, v4si); +v16qi __builtin_ia32_pblendvb128 (v16qi, v16qi, v16qi); +v8hi __builtin_ia32_pblendw128 (v8hi, v8hi, const int); +v2di __builtin_ia32_pcmpeqq (v2di, v2di); +v8hi __builtin_ia32_phminposuw128 (v8hi); +v16qi __builtin_ia32_pmaxsb128 (v16qi, v16qi); +v4si __builtin_ia32_pmaxsd128 (v4si, v4si); +v4si __builtin_ia32_pmaxud128 (v4si, v4si); +v8hi __builtin_ia32_pmaxuw128 (v8hi, v8hi); +v16qi __builtin_ia32_pminsb128 (v16qi, v16qi); +v4si __builtin_ia32_pminsd128 (v4si, v4si); +v4si __builtin_ia32_pminud128 (v4si, v4si); +v8hi __builtin_ia32_pminuw128 (v8hi, v8hi); +v4si __builtin_ia32_pmovsxbd128 (v16qi); +v2di __builtin_ia32_pmovsxbq128 (v16qi); +v8hi __builtin_ia32_pmovsxbw128 (v16qi); +v2di __builtin_ia32_pmovsxdq128 (v4si); +v4si __builtin_ia32_pmovsxwd128 (v8hi); +v2di __builtin_ia32_pmovsxwq128 (v8hi); +v4si __builtin_ia32_pmovzxbd128 (v16qi); +v2di __builtin_ia32_pmovzxbq128 (v16qi); +v8hi __builtin_ia32_pmovzxbw128 (v16qi); +v2di __builtin_ia32_pmovzxdq128 (v4si); +v4si __builtin_ia32_pmovzxwd128 (v8hi); +v2di __builtin_ia32_pmovzxwq128 (v8hi); +v2di __builtin_ia32_pmuldq128 (v4si, v4si); +v4si __builtin_ia32_pmulld128 (v4si, v4si); +int __builtin_ia32_ptestc128 (v2di, v2di); +int __builtin_ia32_ptestnzc128 (v2di, v2di); +int __builtin_ia32_ptestz128 (v2di, v2di); +v2df __builtin_ia32_roundpd (v2df, const int); +v4sf __builtin_ia32_roundps (v4sf, const int); +v2df __builtin_ia32_roundsd (v2df, v2df, const int); +v4sf __builtin_ia32_roundss (v4sf, v4sf, const int); @end smallexample The following built-in functions are available when @option{-msse4.1} is @@ -22388,21 +22381,21 @@ used. All of them generate the machine instruction that is part of the name. @smallexample -v16qi __builtin_ia32_pcmpestrm128 (v16qi, int, v16qi, int, const int) -int __builtin_ia32_pcmpestri128 (v16qi, int, v16qi, int, const int) -int __builtin_ia32_pcmpestria128 (v16qi, int, v16qi, int, const int) -int __builtin_ia32_pcmpestric128 (v16qi, int, v16qi, int, const int) -int __builtin_ia32_pcmpestrio128 (v16qi, int, v16qi, int, const int) -int __builtin_ia32_pcmpestris128 (v16qi, int, v16qi, int, const int) -int __builtin_ia32_pcmpestriz128 (v16qi, int, v16qi, int, const int) -v16qi __builtin_ia32_pcmpistrm128 (v16qi, v16qi, const int) -int __builtin_ia32_pcmpistri128 (v16qi, v16qi, const int) -int __builtin_ia32_pcmpistria128 (v16qi, v16qi, const int) -int __builtin_ia32_pcmpistric128 (v16qi, v16qi, const int) -int __builtin_ia32_pcmpistrio128 (v16qi, v16qi, const int) -int __builtin_ia32_pcmpistris128 (v16qi, v16qi, const int) -int __builtin_ia32_pcmpistriz128 (v16qi, v16qi, const int) -v2di __builtin_ia32_pcmpgtq (v2di, v2di) +v16qi __builtin_ia32_pcmpestrm128 (v16qi, int, v16qi, int, const int); +int __builtin_ia32_pcmpestri128 (v16qi, int, v16qi, int, const int); +int __builtin_ia32_pcmpestria128 (v16qi, int, v16qi, int, const int); +int __builtin_ia32_pcmpestric128 (v16qi, int, v16qi, int, const int); +int __builtin_ia32_pcmpestrio128 (v16qi, int, v16qi, int, const int); +int __builtin_ia32_pcmpestris128 (v16qi, int, v16qi, int, const int); +int __builtin_ia32_pcmpestriz128 (v16qi, int, v16qi, int, const int); +v16qi __builtin_ia32_pcmpistrm128 (v16qi, v16qi, const int); +int __builtin_ia32_pcmpistri128 (v16qi, v16qi, const int); +int __builtin_ia32_pcmpistria128 (v16qi, v16qi, const int); +int __builtin_ia32_pcmpistric128 (v16qi, v16qi, const int); +int __builtin_ia32_pcmpistrio128 (v16qi, v16qi, const int); +int __builtin_ia32_pcmpistris128 (v16qi, v16qi, const int); +int __builtin_ia32_pcmpistriz128 (v16qi, v16qi, const int); +v2di __builtin_ia32_pcmpgtq (v2di, v2di); @end smallexample The following built-in functions are available when @option{-msse4.2} is @@ -22437,134 +22430,134 @@ used. All of them generate the machine instruction that is part of the name. @smallexample -v4df __builtin_ia32_addpd256 (v4df,v4df) -v8sf __builtin_ia32_addps256 (v8sf,v8sf) -v4df __builtin_ia32_addsubpd256 (v4df,v4df) -v8sf __builtin_ia32_addsubps256 (v8sf,v8sf) -v4df __builtin_ia32_andnpd256 (v4df,v4df) -v8sf __builtin_ia32_andnps256 (v8sf,v8sf) -v4df __builtin_ia32_andpd256 (v4df,v4df) -v8sf __builtin_ia32_andps256 (v8sf,v8sf) -v4df __builtin_ia32_blendpd256 (v4df,v4df,int) -v8sf __builtin_ia32_blendps256 (v8sf,v8sf,int) -v4df __builtin_ia32_blendvpd256 (v4df,v4df,v4df) -v8sf __builtin_ia32_blendvps256 (v8sf,v8sf,v8sf) -v2df __builtin_ia32_cmppd (v2df,v2df,int) -v4df __builtin_ia32_cmppd256 (v4df,v4df,int) -v4sf __builtin_ia32_cmpps (v4sf,v4sf,int) -v8sf __builtin_ia32_cmpps256 (v8sf,v8sf,int) -v2df __builtin_ia32_cmpsd (v2df,v2df,int) -v4sf __builtin_ia32_cmpss (v4sf,v4sf,int) -v4df __builtin_ia32_cvtdq2pd256 (v4si) -v8sf __builtin_ia32_cvtdq2ps256 (v8si) -v4si __builtin_ia32_cvtpd2dq256 (v4df) -v4sf __builtin_ia32_cvtpd2ps256 (v4df) -v8si __builtin_ia32_cvtps2dq256 (v8sf) -v4df __builtin_ia32_cvtps2pd256 (v4sf) -v4si __builtin_ia32_cvttpd2dq256 (v4df) -v8si __builtin_ia32_cvttps2dq256 (v8sf) -v4df __builtin_ia32_divpd256 (v4df,v4df) -v8sf __builtin_ia32_divps256 (v8sf,v8sf) -v8sf __builtin_ia32_dpps256 (v8sf,v8sf,int) -v4df __builtin_ia32_haddpd256 (v4df,v4df) -v8sf __builtin_ia32_haddps256 (v8sf,v8sf) -v4df __builtin_ia32_hsubpd256 (v4df,v4df) -v8sf __builtin_ia32_hsubps256 (v8sf,v8sf) -v32qi __builtin_ia32_lddqu256 (pcchar) -v32qi __builtin_ia32_loaddqu256 (pcchar) -v4df __builtin_ia32_loadupd256 (pcdouble) -v8sf __builtin_ia32_loadups256 (pcfloat) -v2df __builtin_ia32_maskloadpd (pcv2df,v2df) -v4df __builtin_ia32_maskloadpd256 (pcv4df,v4df) -v4sf __builtin_ia32_maskloadps (pcv4sf,v4sf) -v8sf __builtin_ia32_maskloadps256 (pcv8sf,v8sf) -void __builtin_ia32_maskstorepd (pv2df,v2df,v2df) -void __builtin_ia32_maskstorepd256 (pv4df,v4df,v4df) -void __builtin_ia32_maskstoreps (pv4sf,v4sf,v4sf) -void __builtin_ia32_maskstoreps256 (pv8sf,v8sf,v8sf) -v4df __builtin_ia32_maxpd256 (v4df,v4df) -v8sf __builtin_ia32_maxps256 (v8sf,v8sf) -v4df __builtin_ia32_minpd256 (v4df,v4df) -v8sf __builtin_ia32_minps256 (v8sf,v8sf) -v4df __builtin_ia32_movddup256 (v4df) -int __builtin_ia32_movmskpd256 (v4df) -int __builtin_ia32_movmskps256 (v8sf) -v8sf __builtin_ia32_movshdup256 (v8sf) -v8sf __builtin_ia32_movsldup256 (v8sf) -v4df __builtin_ia32_mulpd256 (v4df,v4df) -v8sf __builtin_ia32_mulps256 (v8sf,v8sf) -v4df __builtin_ia32_orpd256 (v4df,v4df) -v8sf __builtin_ia32_orps256 (v8sf,v8sf) -v2df __builtin_ia32_pd_pd256 (v4df) -v4df __builtin_ia32_pd256_pd (v2df) -v4sf __builtin_ia32_ps_ps256 (v8sf) -v8sf __builtin_ia32_ps256_ps (v4sf) -int __builtin_ia32_ptestc256 (v4di,v4di,ptest) -int __builtin_ia32_ptestnzc256 (v4di,v4di,ptest) -int __builtin_ia32_ptestz256 (v4di,v4di,ptest) -v8sf __builtin_ia32_rcpps256 (v8sf) -v4df __builtin_ia32_roundpd256 (v4df,int) -v8sf __builtin_ia32_roundps256 (v8sf,int) -v8sf __builtin_ia32_rsqrtps_nr256 (v8sf) -v8sf __builtin_ia32_rsqrtps256 (v8sf) -v4df __builtin_ia32_shufpd256 (v4df,v4df,int) -v8sf __builtin_ia32_shufps256 (v8sf,v8sf,int) -v4si __builtin_ia32_si_si256 (v8si) -v8si __builtin_ia32_si256_si (v4si) -v4df __builtin_ia32_sqrtpd256 (v4df) -v8sf __builtin_ia32_sqrtps_nr256 (v8sf) -v8sf __builtin_ia32_sqrtps256 (v8sf) -void __builtin_ia32_storedqu256 (pchar,v32qi) -void __builtin_ia32_storeupd256 (pdouble,v4df) -void __builtin_ia32_storeups256 (pfloat,v8sf) -v4df __builtin_ia32_subpd256 (v4df,v4df) -v8sf __builtin_ia32_subps256 (v8sf,v8sf) -v4df __builtin_ia32_unpckhpd256 (v4df,v4df) -v8sf __builtin_ia32_unpckhps256 (v8sf,v8sf) -v4df __builtin_ia32_unpcklpd256 (v4df,v4df) -v8sf __builtin_ia32_unpcklps256 (v8sf,v8sf) -v4df __builtin_ia32_vbroadcastf128_pd256 (pcv2df) -v8sf __builtin_ia32_vbroadcastf128_ps256 (pcv4sf) -v4df __builtin_ia32_vbroadcastsd256 (pcdouble) -v4sf __builtin_ia32_vbroadcastss (pcfloat) -v8sf __builtin_ia32_vbroadcastss256 (pcfloat) -v2df __builtin_ia32_vextractf128_pd256 (v4df,int) -v4sf __builtin_ia32_vextractf128_ps256 (v8sf,int) -v4si __builtin_ia32_vextractf128_si256 (v8si,int) -v4df __builtin_ia32_vinsertf128_pd256 (v4df,v2df,int) -v8sf __builtin_ia32_vinsertf128_ps256 (v8sf,v4sf,int) -v8si __builtin_ia32_vinsertf128_si256 (v8si,v4si,int) -v4df __builtin_ia32_vperm2f128_pd256 (v4df,v4df,int) -v8sf __builtin_ia32_vperm2f128_ps256 (v8sf,v8sf,int) -v8si __builtin_ia32_vperm2f128_si256 (v8si,v8si,int) -v2df __builtin_ia32_vpermil2pd (v2df,v2df,v2di,int) -v4df __builtin_ia32_vpermil2pd256 (v4df,v4df,v4di,int) -v4sf __builtin_ia32_vpermil2ps (v4sf,v4sf,v4si,int) -v8sf __builtin_ia32_vpermil2ps256 (v8sf,v8sf,v8si,int) -v2df __builtin_ia32_vpermilpd (v2df,int) -v4df __builtin_ia32_vpermilpd256 (v4df,int) -v4sf __builtin_ia32_vpermilps (v4sf,int) -v8sf __builtin_ia32_vpermilps256 (v8sf,int) -v2df __builtin_ia32_vpermilvarpd (v2df,v2di) -v4df __builtin_ia32_vpermilvarpd256 (v4df,v4di) -v4sf __builtin_ia32_vpermilvarps (v4sf,v4si) -v8sf __builtin_ia32_vpermilvarps256 (v8sf,v8si) -int __builtin_ia32_vtestcpd (v2df,v2df,ptest) -int __builtin_ia32_vtestcpd256 (v4df,v4df,ptest) -int __builtin_ia32_vtestcps (v4sf,v4sf,ptest) -int __builtin_ia32_vtestcps256 (v8sf,v8sf,ptest) -int __builtin_ia32_vtestnzcpd (v2df,v2df,ptest) -int __builtin_ia32_vtestnzcpd256 (v4df,v4df,ptest) -int __builtin_ia32_vtestnzcps (v4sf,v4sf,ptest) -int __builtin_ia32_vtestnzcps256 (v8sf,v8sf,ptest) -int __builtin_ia32_vtestzpd (v2df,v2df,ptest) -int __builtin_ia32_vtestzpd256 (v4df,v4df,ptest) -int __builtin_ia32_vtestzps (v4sf,v4sf,ptest) -int __builtin_ia32_vtestzps256 (v8sf,v8sf,ptest) -void __builtin_ia32_vzeroall (void) -void __builtin_ia32_vzeroupper (void) -v4df __builtin_ia32_xorpd256 (v4df,v4df) -v8sf __builtin_ia32_xorps256 (v8sf,v8sf) +v4df __builtin_ia32_addpd256 (v4df,v4df); +v8sf __builtin_ia32_addps256 (v8sf,v8sf); +v4df __builtin_ia32_addsubpd256 (v4df,v4df); +v8sf __builtin_ia32_addsubps256 (v8sf,v8sf); +v4df __builtin_ia32_andnpd256 (v4df,v4df); +v8sf __builtin_ia32_andnps256 (v8sf,v8sf); +v4df __builtin_ia32_andpd256 (v4df,v4df); +v8sf __builtin_ia32_andps256 (v8sf,v8sf); +v4df __builtin_ia32_blendpd256 (v4df,v4df,int); +v8sf __builtin_ia32_blendps256 (v8sf,v8sf,int); +v4df __builtin_ia32_blendvpd256 (v4df,v4df,v4df); +v8sf __builtin_ia32_blendvps256 (v8sf,v8sf,v8sf); +v2df __builtin_ia32_cmppd (v2df,v2df,int); +v4df __builtin_ia32_cmppd256 (v4df,v4df,int); +v4sf __builtin_ia32_cmpps (v4sf,v4sf,int); +v8sf __builtin_ia32_cmpps256 (v8sf,v8sf,int); +v2df __builtin_ia32_cmpsd (v2df,v2df,int); +v4sf __builtin_ia32_cmpss (v4sf,v4sf,int); +v4df __builtin_ia32_cvtdq2pd256 (v4si); +v8sf __builtin_ia32_cvtdq2ps256 (v8si); +v4si __builtin_ia32_cvtpd2dq256 (v4df); +v4sf __builtin_ia32_cvtpd2ps256 (v4df); +v8si __builtin_ia32_cvtps2dq256 (v8sf); +v4df __builtin_ia32_cvtps2pd256 (v4sf); +v4si __builtin_ia32_cvttpd2dq256 (v4df); +v8si __builtin_ia32_cvttps2dq256 (v8sf); +v4df __builtin_ia32_divpd256 (v4df,v4df); +v8sf __builtin_ia32_divps256 (v8sf,v8sf); +v8sf __builtin_ia32_dpps256 (v8sf,v8sf,int); +v4df __builtin_ia32_haddpd256 (v4df,v4df); +v8sf __builtin_ia32_haddps256 (v8sf,v8sf); +v4df __builtin_ia32_hsubpd256 (v4df,v4df); +v8sf __builtin_ia32_hsubps256 (v8sf,v8sf); +v32qi __builtin_ia32_lddqu256 (pcchar); +v32qi __builtin_ia32_loaddqu256 (pcchar); +v4df __builtin_ia32_loadupd256 (pcdouble); +v8sf __builtin_ia32_loadups256 (pcfloat); +v2df __builtin_ia32_maskloadpd (pcv2df,v2df); +v4df __builtin_ia32_maskloadpd256 (pcv4df,v4df); +v4sf __builtin_ia32_maskloadps (pcv4sf,v4sf); +v8sf __builtin_ia32_maskloadps256 (pcv8sf,v8sf); +void __builtin_ia32_maskstorepd (pv2df,v2df,v2df); +void __builtin_ia32_maskstorepd256 (pv4df,v4df,v4df); +void __builtin_ia32_maskstoreps (pv4sf,v4sf,v4sf); +void __builtin_ia32_maskstoreps256 (pv8sf,v8sf,v8sf); +v4df __builtin_ia32_maxpd256 (v4df,v4df); +v8sf __builtin_ia32_maxps256 (v8sf,v8sf); +v4df __builtin_ia32_minpd256 (v4df,v4df); +v8sf __builtin_ia32_minps256 (v8sf,v8sf); +v4df __builtin_ia32_movddup256 (v4df); +int __builtin_ia32_movmskpd256 (v4df); +int __builtin_ia32_movmskps256 (v8sf); +v8sf __builtin_ia32_movshdup256 (v8sf); +v8sf __builtin_ia32_movsldup256 (v8sf); +v4df __builtin_ia32_mulpd256 (v4df,v4df); +v8sf __builtin_ia32_mulps256 (v8sf,v8sf); +v4df __builtin_ia32_orpd256 (v4df,v4df); +v8sf __builtin_ia32_orps256 (v8sf,v8sf); +v2df __builtin_ia32_pd_pd256 (v4df); +v4df __builtin_ia32_pd256_pd (v2df); +v4sf __builtin_ia32_ps_ps256 (v8sf); +v8sf __builtin_ia32_ps256_ps (v4sf); +int __builtin_ia32_ptestc256 (v4di,v4di,ptest); +int __builtin_ia32_ptestnzc256 (v4di,v4di,ptest); +int __builtin_ia32_ptestz256 (v4di,v4di,ptest); +v8sf __builtin_ia32_rcpps256 (v8sf); +v4df __builtin_ia32_roundpd256 (v4df,int); +v8sf __builtin_ia32_roundps256 (v8sf,int); +v8sf __builtin_ia32_rsqrtps_nr256 (v8sf); +v8sf __builtin_ia32_rsqrtps256 (v8sf); +v4df __builtin_ia32_shufpd256 (v4df,v4df,int); +v8sf __builtin_ia32_shufps256 (v8sf,v8sf,int); +v4si __builtin_ia32_si_si256 (v8si); +v8si __builtin_ia32_si256_si (v4si); +v4df __builtin_ia32_sqrtpd256 (v4df); +v8sf __builtin_ia32_sqrtps_nr256 (v8sf); +v8sf __builtin_ia32_sqrtps256 (v8sf); +void __builtin_ia32_storedqu256 (pchar,v32qi); +void __builtin_ia32_storeupd256 (pdouble,v4df); +void __builtin_ia32_storeups256 (pfloat,v8sf); +v4df __builtin_ia32_subpd256 (v4df,v4df); +v8sf __builtin_ia32_subps256 (v8sf,v8sf); +v4df __builtin_ia32_unpckhpd256 (v4df,v4df); +v8sf __builtin_ia32_unpckhps256 (v8sf,v8sf); +v4df __builtin_ia32_unpcklpd256 (v4df,v4df); +v8sf __builtin_ia32_unpcklps256 (v8sf,v8sf); +v4df __builtin_ia32_vbroadcastf128_pd256 (pcv2df); +v8sf __builtin_ia32_vbroadcastf128_ps256 (pcv4sf); +v4df __builtin_ia32_vbroadcastsd256 (pcdouble); +v4sf __builtin_ia32_vbroadcastss (pcfloat); +v8sf __builtin_ia32_vbroadcastss256 (pcfloat); +v2df __builtin_ia32_vextractf128_pd256 (v4df,int); +v4sf __builtin_ia32_vextractf128_ps256 (v8sf,int); +v4si __builtin_ia32_vextractf128_si256 (v8si,int); +v4df __builtin_ia32_vinsertf128_pd256 (v4df,v2df,int); +v8sf __builtin_ia32_vinsertf128_ps256 (v8sf,v4sf,int); +v8si __builtin_ia32_vinsertf128_si256 (v8si,v4si,int); +v4df __builtin_ia32_vperm2f128_pd256 (v4df,v4df,int); +v8sf __builtin_ia32_vperm2f128_ps256 (v8sf,v8sf,int); +v8si __builtin_ia32_vperm2f128_si256 (v8si,v8si,int); +v2df __builtin_ia32_vpermil2pd (v2df,v2df,v2di,int); +v4df __builtin_ia32_vpermil2pd256 (v4df,v4df,v4di,int); +v4sf __builtin_ia32_vpermil2ps (v4sf,v4sf,v4si,int); +v8sf __builtin_ia32_vpermil2ps256 (v8sf,v8sf,v8si,int); +v2df __builtin_ia32_vpermilpd (v2df,int); +v4df __builtin_ia32_vpermilpd256 (v4df,int); +v4sf __builtin_ia32_vpermilps (v4sf,int); +v8sf __builtin_ia32_vpermilps256 (v8sf,int); +v2df __builtin_ia32_vpermilvarpd (v2df,v2di); +v4df __builtin_ia32_vpermilvarpd256 (v4df,v4di); +v4sf __builtin_ia32_vpermilvarps (v4sf,v4si); +v8sf __builtin_ia32_vpermilvarps256 (v8sf,v8si); +int __builtin_ia32_vtestcpd (v2df,v2df,ptest); +int __builtin_ia32_vtestcpd256 (v4df,v4df,ptest); +int __builtin_ia32_vtestcps (v4sf,v4sf,ptest); +int __builtin_ia32_vtestcps256 (v8sf,v8sf,ptest); +int __builtin_ia32_vtestnzcpd (v2df,v2df,ptest); +int __builtin_ia32_vtestnzcpd256 (v4df,v4df,ptest); +int __builtin_ia32_vtestnzcps (v4sf,v4sf,ptest); +int __builtin_ia32_vtestnzcps256 (v8sf,v8sf,ptest); +int __builtin_ia32_vtestzpd (v2df,v2df,ptest); +int __builtin_ia32_vtestzpd256 (v4df,v4df,ptest); +int __builtin_ia32_vtestzps (v4sf,v4sf,ptest); +int __builtin_ia32_vtestzps256 (v8sf,v8sf,ptest); +void __builtin_ia32_vzeroall (void); +void __builtin_ia32_vzeroupper (void); +v4df __builtin_ia32_xorpd256 (v4df,v4df); +v8sf __builtin_ia32_xorps256 (v8sf,v8sf); @end smallexample The following built-in functions are available when @option{-mavx2} is @@ -22572,177 +22565,177 @@ used. All of them generate the machine instruction that is part of the name. @smallexample -v32qi __builtin_ia32_mpsadbw256 (v32qi,v32qi,int) -v32qi __builtin_ia32_pabsb256 (v32qi) -v16hi __builtin_ia32_pabsw256 (v16hi) -v8si __builtin_ia32_pabsd256 (v8si) -v16hi __builtin_ia32_packssdw256 (v8si,v8si) -v32qi __builtin_ia32_packsswb256 (v16hi,v16hi) -v16hi __builtin_ia32_packusdw256 (v8si,v8si) -v32qi __builtin_ia32_packuswb256 (v16hi,v16hi) -v32qi __builtin_ia32_paddb256 (v32qi,v32qi) -v16hi __builtin_ia32_paddw256 (v16hi,v16hi) -v8si __builtin_ia32_paddd256 (v8si,v8si) -v4di __builtin_ia32_paddq256 (v4di,v4di) -v32qi __builtin_ia32_paddsb256 (v32qi,v32qi) -v16hi __builtin_ia32_paddsw256 (v16hi,v16hi) -v32qi __builtin_ia32_paddusb256 (v32qi,v32qi) -v16hi __builtin_ia32_paddusw256 (v16hi,v16hi) -v4di __builtin_ia32_palignr256 (v4di,v4di,int) -v4di __builtin_ia32_andsi256 (v4di,v4di) -v4di __builtin_ia32_andnotsi256 (v4di,v4di) -v32qi __builtin_ia32_pavgb256 (v32qi,v32qi) -v16hi __builtin_ia32_pavgw256 (v16hi,v16hi) -v32qi __builtin_ia32_pblendvb256 (v32qi,v32qi,v32qi) -v16hi __builtin_ia32_pblendw256 (v16hi,v16hi,int) -v32qi __builtin_ia32_pcmpeqb256 (v32qi,v32qi) -v16hi __builtin_ia32_pcmpeqw256 (v16hi,v16hi) -v8si __builtin_ia32_pcmpeqd256 (c8si,v8si) -v4di __builtin_ia32_pcmpeqq256 (v4di,v4di) -v32qi __builtin_ia32_pcmpgtb256 (v32qi,v32qi) -v16hi __builtin_ia32_pcmpgtw256 (16hi,v16hi) -v8si __builtin_ia32_pcmpgtd256 (v8si,v8si) -v4di __builtin_ia32_pcmpgtq256 (v4di,v4di) -v16hi __builtin_ia32_phaddw256 (v16hi,v16hi) -v8si __builtin_ia32_phaddd256 (v8si,v8si) -v16hi __builtin_ia32_phaddsw256 (v16hi,v16hi) -v16hi __builtin_ia32_phsubw256 (v16hi,v16hi) -v8si __builtin_ia32_phsubd256 (v8si,v8si) -v16hi __builtin_ia32_phsubsw256 (v16hi,v16hi) -v32qi __builtin_ia32_pmaddubsw256 (v32qi,v32qi) -v16hi __builtin_ia32_pmaddwd256 (v16hi,v16hi) -v32qi __builtin_ia32_pmaxsb256 (v32qi,v32qi) -v16hi __builtin_ia32_pmaxsw256 (v16hi,v16hi) -v8si __builtin_ia32_pmaxsd256 (v8si,v8si) -v32qi __builtin_ia32_pmaxub256 (v32qi,v32qi) -v16hi __builtin_ia32_pmaxuw256 (v16hi,v16hi) -v8si __builtin_ia32_pmaxud256 (v8si,v8si) -v32qi __builtin_ia32_pminsb256 (v32qi,v32qi) -v16hi __builtin_ia32_pminsw256 (v16hi,v16hi) -v8si __builtin_ia32_pminsd256 (v8si,v8si) -v32qi __builtin_ia32_pminub256 (v32qi,v32qi) -v16hi __builtin_ia32_pminuw256 (v16hi,v16hi) -v8si __builtin_ia32_pminud256 (v8si,v8si) -int __builtin_ia32_pmovmskb256 (v32qi) -v16hi __builtin_ia32_pmovsxbw256 (v16qi) -v8si __builtin_ia32_pmovsxbd256 (v16qi) -v4di __builtin_ia32_pmovsxbq256 (v16qi) -v8si __builtin_ia32_pmovsxwd256 (v8hi) -v4di __builtin_ia32_pmovsxwq256 (v8hi) -v4di __builtin_ia32_pmovsxdq256 (v4si) -v16hi __builtin_ia32_pmovzxbw256 (v16qi) -v8si __builtin_ia32_pmovzxbd256 (v16qi) -v4di __builtin_ia32_pmovzxbq256 (v16qi) -v8si __builtin_ia32_pmovzxwd256 (v8hi) -v4di __builtin_ia32_pmovzxwq256 (v8hi) -v4di __builtin_ia32_pmovzxdq256 (v4si) -v4di __builtin_ia32_pmuldq256 (v8si,v8si) -v16hi __builtin_ia32_pmulhrsw256 (v16hi, v16hi) -v16hi __builtin_ia32_pmulhuw256 (v16hi,v16hi) -v16hi __builtin_ia32_pmulhw256 (v16hi,v16hi) -v16hi __builtin_ia32_pmullw256 (v16hi,v16hi) -v8si __builtin_ia32_pmulld256 (v8si,v8si) -v4di __builtin_ia32_pmuludq256 (v8si,v8si) -v4di __builtin_ia32_por256 (v4di,v4di) -v16hi __builtin_ia32_psadbw256 (v32qi,v32qi) -v32qi __builtin_ia32_pshufb256 (v32qi,v32qi) -v8si __builtin_ia32_pshufd256 (v8si,int) -v16hi __builtin_ia32_pshufhw256 (v16hi,int) -v16hi __builtin_ia32_pshuflw256 (v16hi,int) -v32qi __builtin_ia32_psignb256 (v32qi,v32qi) -v16hi __builtin_ia32_psignw256 (v16hi,v16hi) -v8si __builtin_ia32_psignd256 (v8si,v8si) -v4di __builtin_ia32_pslldqi256 (v4di,int) -v16hi __builtin_ia32_psllwi256 (16hi,int) -v16hi __builtin_ia32_psllw256(v16hi,v8hi) -v8si __builtin_ia32_pslldi256 (v8si,int) -v8si __builtin_ia32_pslld256(v8si,v4si) -v4di __builtin_ia32_psllqi256 (v4di,int) -v4di __builtin_ia32_psllq256(v4di,v2di) -v16hi __builtin_ia32_psrawi256 (v16hi,int) -v16hi __builtin_ia32_psraw256 (v16hi,v8hi) -v8si __builtin_ia32_psradi256 (v8si,int) -v8si __builtin_ia32_psrad256 (v8si,v4si) -v4di __builtin_ia32_psrldqi256 (v4di, int) -v16hi __builtin_ia32_psrlwi256 (v16hi,int) -v16hi __builtin_ia32_psrlw256 (v16hi,v8hi) -v8si __builtin_ia32_psrldi256 (v8si,int) -v8si __builtin_ia32_psrld256 (v8si,v4si) -v4di __builtin_ia32_psrlqi256 (v4di,int) -v4di __builtin_ia32_psrlq256(v4di,v2di) -v32qi __builtin_ia32_psubb256 (v32qi,v32qi) -v32hi __builtin_ia32_psubw256 (v16hi,v16hi) -v8si __builtin_ia32_psubd256 (v8si,v8si) -v4di __builtin_ia32_psubq256 (v4di,v4di) -v32qi __builtin_ia32_psubsb256 (v32qi,v32qi) -v16hi __builtin_ia32_psubsw256 (v16hi,v16hi) -v32qi __builtin_ia32_psubusb256 (v32qi,v32qi) -v16hi __builtin_ia32_psubusw256 (v16hi,v16hi) -v32qi __builtin_ia32_punpckhbw256 (v32qi,v32qi) -v16hi __builtin_ia32_punpckhwd256 (v16hi,v16hi) -v8si __builtin_ia32_punpckhdq256 (v8si,v8si) -v4di __builtin_ia32_punpckhqdq256 (v4di,v4di) -v32qi __builtin_ia32_punpcklbw256 (v32qi,v32qi) -v16hi __builtin_ia32_punpcklwd256 (v16hi,v16hi) -v8si __builtin_ia32_punpckldq256 (v8si,v8si) -v4di __builtin_ia32_punpcklqdq256 (v4di,v4di) -v4di __builtin_ia32_pxor256 (v4di,v4di) -v4di __builtin_ia32_movntdqa256 (pv4di) -v4sf __builtin_ia32_vbroadcastss_ps (v4sf) -v8sf __builtin_ia32_vbroadcastss_ps256 (v4sf) -v4df __builtin_ia32_vbroadcastsd_pd256 (v2df) -v4di __builtin_ia32_vbroadcastsi256 (v2di) -v4si __builtin_ia32_pblendd128 (v4si,v4si) -v8si __builtin_ia32_pblendd256 (v8si,v8si) -v32qi __builtin_ia32_pbroadcastb256 (v16qi) -v16hi __builtin_ia32_pbroadcastw256 (v8hi) -v8si __builtin_ia32_pbroadcastd256 (v4si) -v4di __builtin_ia32_pbroadcastq256 (v2di) -v16qi __builtin_ia32_pbroadcastb128 (v16qi) -v8hi __builtin_ia32_pbroadcastw128 (v8hi) -v4si __builtin_ia32_pbroadcastd128 (v4si) -v2di __builtin_ia32_pbroadcastq128 (v2di) -v8si __builtin_ia32_permvarsi256 (v8si,v8si) -v4df __builtin_ia32_permdf256 (v4df,int) -v8sf __builtin_ia32_permvarsf256 (v8sf,v8sf) -v4di __builtin_ia32_permdi256 (v4di,int) -v4di __builtin_ia32_permti256 (v4di,v4di,int) -v4di __builtin_ia32_extract128i256 (v4di,int) -v4di __builtin_ia32_insert128i256 (v4di,v2di,int) -v8si __builtin_ia32_maskloadd256 (pcv8si,v8si) -v4di __builtin_ia32_maskloadq256 (pcv4di,v4di) -v4si __builtin_ia32_maskloadd (pcv4si,v4si) -v2di __builtin_ia32_maskloadq (pcv2di,v2di) -void __builtin_ia32_maskstored256 (pv8si,v8si,v8si) -void __builtin_ia32_maskstoreq256 (pv4di,v4di,v4di) -void __builtin_ia32_maskstored (pv4si,v4si,v4si) -void __builtin_ia32_maskstoreq (pv2di,v2di,v2di) -v8si __builtin_ia32_psllv8si (v8si,v8si) -v4si __builtin_ia32_psllv4si (v4si,v4si) -v4di __builtin_ia32_psllv4di (v4di,v4di) -v2di __builtin_ia32_psllv2di (v2di,v2di) -v8si __builtin_ia32_psrav8si (v8si,v8si) -v4si __builtin_ia32_psrav4si (v4si,v4si) -v8si __builtin_ia32_psrlv8si (v8si,v8si) -v4si __builtin_ia32_psrlv4si (v4si,v4si) -v4di __builtin_ia32_psrlv4di (v4di,v4di) -v2di __builtin_ia32_psrlv2di (v2di,v2di) -v2df __builtin_ia32_gathersiv2df (v2df, pcdouble,v4si,v2df,int) -v4df __builtin_ia32_gathersiv4df (v4df, pcdouble,v4si,v4df,int) -v2df __builtin_ia32_gatherdiv2df (v2df, pcdouble,v2di,v2df,int) -v4df __builtin_ia32_gatherdiv4df (v4df, pcdouble,v4di,v4df,int) -v4sf __builtin_ia32_gathersiv4sf (v4sf, pcfloat,v4si,v4sf,int) -v8sf __builtin_ia32_gathersiv8sf (v8sf, pcfloat,v8si,v8sf,int) -v4sf __builtin_ia32_gatherdiv4sf (v4sf, pcfloat,v2di,v4sf,int) -v4sf __builtin_ia32_gatherdiv4sf256 (v4sf, pcfloat,v4di,v4sf,int) -v2di __builtin_ia32_gathersiv2di (v2di, pcint64,v4si,v2di,int) -v4di __builtin_ia32_gathersiv4di (v4di, pcint64,v4si,v4di,int) -v2di __builtin_ia32_gatherdiv2di (v2di, pcint64,v2di,v2di,int) -v4di __builtin_ia32_gatherdiv4di (v4di, pcint64,v4di,v4di,int) -v4si __builtin_ia32_gathersiv4si (v4si, pcint,v4si,v4si,int) -v8si __builtin_ia32_gathersiv8si (v8si, pcint,v8si,v8si,int) -v4si __builtin_ia32_gatherdiv4si (v4si, pcint,v2di,v4si,int) -v4si __builtin_ia32_gatherdiv4si256 (v4si, pcint,v4di,v4si,int) +v32qi __builtin_ia32_mpsadbw256 (v32qi,v32qi,int); +v32qi __builtin_ia32_pabsb256 (v32qi); +v16hi __builtin_ia32_pabsw256 (v16hi); +v8si __builtin_ia32_pabsd256 (v8si); +v16hi __builtin_ia32_packssdw256 (v8si,v8si); +v32qi __builtin_ia32_packsswb256 (v16hi,v16hi); +v16hi __builtin_ia32_packusdw256 (v8si,v8si); +v32qi __builtin_ia32_packuswb256 (v16hi,v16hi); +v32qi __builtin_ia32_paddb256 (v32qi,v32qi); +v16hi __builtin_ia32_paddw256 (v16hi,v16hi); +v8si __builtin_ia32_paddd256 (v8si,v8si); +v4di __builtin_ia32_paddq256 (v4di,v4di); +v32qi __builtin_ia32_paddsb256 (v32qi,v32qi); +v16hi __builtin_ia32_paddsw256 (v16hi,v16hi); +v32qi __builtin_ia32_paddusb256 (v32qi,v32qi); +v16hi __builtin_ia32_paddusw256 (v16hi,v16hi); +v4di __builtin_ia32_palignr256 (v4di,v4di,int); +v4di __builtin_ia32_andsi256 (v4di,v4di); +v4di __builtin_ia32_andnotsi256 (v4di,v4di); +v32qi __builtin_ia32_pavgb256 (v32qi,v32qi); +v16hi __builtin_ia32_pavgw256 (v16hi,v16hi); +v32qi __builtin_ia32_pblendvb256 (v32qi,v32qi,v32qi); +v16hi __builtin_ia32_pblendw256 (v16hi,v16hi,int); +v32qi __builtin_ia32_pcmpeqb256 (v32qi,v32qi); +v16hi __builtin_ia32_pcmpeqw256 (v16hi,v16hi); +v8si __builtin_ia32_pcmpeqd256 (c8si,v8si); +v4di __builtin_ia32_pcmpeqq256 (v4di,v4di); +v32qi __builtin_ia32_pcmpgtb256 (v32qi,v32qi); +v16hi __builtin_ia32_pcmpgtw256 (16hi,v16hi); +v8si __builtin_ia32_pcmpgtd256 (v8si,v8si); +v4di __builtin_ia32_pcmpgtq256 (v4di,v4di); +v16hi __builtin_ia32_phaddw256 (v16hi,v16hi); +v8si __builtin_ia32_phaddd256 (v8si,v8si); +v16hi __builtin_ia32_phaddsw256 (v16hi,v16hi); +v16hi __builtin_ia32_phsubw256 (v16hi,v16hi); +v8si __builtin_ia32_phsubd256 (v8si,v8si); +v16hi __builtin_ia32_phsubsw256 (v16hi,v16hi); +v32qi __builtin_ia32_pmaddubsw256 (v32qi,v32qi); +v16hi __builtin_ia32_pmaddwd256 (v16hi,v16hi); +v32qi __builtin_ia32_pmaxsb256 (v32qi,v32qi); +v16hi __builtin_ia32_pmaxsw256 (v16hi,v16hi); +v8si __builtin_ia32_pmaxsd256 (v8si,v8si); +v32qi __builtin_ia32_pmaxub256 (v32qi,v32qi); +v16hi __builtin_ia32_pmaxuw256 (v16hi,v16hi); +v8si __builtin_ia32_pmaxud256 (v8si,v8si); +v32qi __builtin_ia32_pminsb256 (v32qi,v32qi); +v16hi __builtin_ia32_pminsw256 (v16hi,v16hi); +v8si __builtin_ia32_pminsd256 (v8si,v8si); +v32qi __builtin_ia32_pminub256 (v32qi,v32qi); +v16hi __builtin_ia32_pminuw256 (v16hi,v16hi); +v8si __builtin_ia32_pminud256 (v8si,v8si); +int __builtin_ia32_pmovmskb256 (v32qi); +v16hi __builtin_ia32_pmovsxbw256 (v16qi); +v8si __builtin_ia32_pmovsxbd256 (v16qi); +v4di __builtin_ia32_pmovsxbq256 (v16qi); +v8si __builtin_ia32_pmovsxwd256 (v8hi); +v4di __builtin_ia32_pmovsxwq256 (v8hi); +v4di __builtin_ia32_pmovsxdq256 (v4si); +v16hi __builtin_ia32_pmovzxbw256 (v16qi); +v8si __builtin_ia32_pmovzxbd256 (v16qi); +v4di __builtin_ia32_pmovzxbq256 (v16qi); +v8si __builtin_ia32_pmovzxwd256 (v8hi); +v4di __builtin_ia32_pmovzxwq256 (v8hi); +v4di __builtin_ia32_pmovzxdq256 (v4si); +v4di __builtin_ia32_pmuldq256 (v8si,v8si); +v16hi __builtin_ia32_pmulhrsw256 (v16hi, v16hi); +v16hi __builtin_ia32_pmulhuw256 (v16hi,v16hi); +v16hi __builtin_ia32_pmulhw256 (v16hi,v16hi); +v16hi __builtin_ia32_pmullw256 (v16hi,v16hi); +v8si __builtin_ia32_pmulld256 (v8si,v8si); +v4di __builtin_ia32_pmuludq256 (v8si,v8si); +v4di __builtin_ia32_por256 (v4di,v4di); +v16hi __builtin_ia32_psadbw256 (v32qi,v32qi); +v32qi __builtin_ia32_pshufb256 (v32qi,v32qi); +v8si __builtin_ia32_pshufd256 (v8si,int); +v16hi __builtin_ia32_pshufhw256 (v16hi,int); +v16hi __builtin_ia32_pshuflw256 (v16hi,int); +v32qi __builtin_ia32_psignb256 (v32qi,v32qi); +v16hi __builtin_ia32_psignw256 (v16hi,v16hi); +v8si __builtin_ia32_psignd256 (v8si,v8si); +v4di __builtin_ia32_pslldqi256 (v4di,int); +v16hi __builtin_ia32_psllwi256 (16hi,int); +v16hi __builtin_ia32_psllw256(v16hi,v8hi); +v8si __builtin_ia32_pslldi256 (v8si,int); +v8si __builtin_ia32_pslld256(v8si,v4si); +v4di __builtin_ia32_psllqi256 (v4di,int); +v4di __builtin_ia32_psllq256(v4di,v2di); +v16hi __builtin_ia32_psrawi256 (v16hi,int); +v16hi __builtin_ia32_psraw256 (v16hi,v8hi); +v8si __builtin_ia32_psradi256 (v8si,int); +v8si __builtin_ia32_psrad256 (v8si,v4si); +v4di __builtin_ia32_psrldqi256 (v4di, int); +v16hi __builtin_ia32_psrlwi256 (v16hi,int); +v16hi __builtin_ia32_psrlw256 (v16hi,v8hi); +v8si __builtin_ia32_psrldi256 (v8si,int); +v8si __builtin_ia32_psrld256 (v8si,v4si); +v4di __builtin_ia32_psrlqi256 (v4di,int); +v4di __builtin_ia32_psrlq256(v4di,v2di); +v32qi __builtin_ia32_psubb256 (v32qi,v32qi); +v32hi __builtin_ia32_psubw256 (v16hi,v16hi); +v8si __builtin_ia32_psubd256 (v8si,v8si); +v4di __builtin_ia32_psubq256 (v4di,v4di); +v32qi __builtin_ia32_psubsb256 (v32qi,v32qi); +v16hi __builtin_ia32_psubsw256 (v16hi,v16hi); +v32qi __builtin_ia32_psubusb256 (v32qi,v32qi); +v16hi __builtin_ia32_psubusw256 (v16hi,v16hi); +v32qi __builtin_ia32_punpckhbw256 (v32qi,v32qi); +v16hi __builtin_ia32_punpckhwd256 (v16hi,v16hi); +v8si __builtin_ia32_punpckhdq256 (v8si,v8si); +v4di __builtin_ia32_punpckhqdq256 (v4di,v4di); +v32qi __builtin_ia32_punpcklbw256 (v32qi,v32qi); +v16hi __builtin_ia32_punpcklwd256 (v16hi,v16hi); +v8si __builtin_ia32_punpckldq256 (v8si,v8si); +v4di __builtin_ia32_punpcklqdq256 (v4di,v4di); +v4di __builtin_ia32_pxor256 (v4di,v4di); +v4di __builtin_ia32_movntdqa256 (pv4di); +v4sf __builtin_ia32_vbroadcastss_ps (v4sf); +v8sf __builtin_ia32_vbroadcastss_ps256 (v4sf); +v4df __builtin_ia32_vbroadcastsd_pd256 (v2df); +v4di __builtin_ia32_vbroadcastsi256 (v2di); +v4si __builtin_ia32_pblendd128 (v4si,v4si); +v8si __builtin_ia32_pblendd256 (v8si,v8si); +v32qi __builtin_ia32_pbroadcastb256 (v16qi); +v16hi __builtin_ia32_pbroadcastw256 (v8hi); +v8si __builtin_ia32_pbroadcastd256 (v4si); +v4di __builtin_ia32_pbroadcastq256 (v2di); +v16qi __builtin_ia32_pbroadcastb128 (v16qi); +v8hi __builtin_ia32_pbroadcastw128 (v8hi); +v4si __builtin_ia32_pbroadcastd128 (v4si); +v2di __builtin_ia32_pbroadcastq128 (v2di); +v8si __builtin_ia32_permvarsi256 (v8si,v8si); +v4df __builtin_ia32_permdf256 (v4df,int); +v8sf __builtin_ia32_permvarsf256 (v8sf,v8sf); +v4di __builtin_ia32_permdi256 (v4di,int); +v4di __builtin_ia32_permti256 (v4di,v4di,int); +v4di __builtin_ia32_extract128i256 (v4di,int); +v4di __builtin_ia32_insert128i256 (v4di,v2di,int); +v8si __builtin_ia32_maskloadd256 (pcv8si,v8si); +v4di __builtin_ia32_maskloadq256 (pcv4di,v4di); +v4si __builtin_ia32_maskloadd (pcv4si,v4si); +v2di __builtin_ia32_maskloadq (pcv2di,v2di); +void __builtin_ia32_maskstored256 (pv8si,v8si,v8si); +void __builtin_ia32_maskstoreq256 (pv4di,v4di,v4di); +void __builtin_ia32_maskstored (pv4si,v4si,v4si); +void __builtin_ia32_maskstoreq (pv2di,v2di,v2di); +v8si __builtin_ia32_psllv8si (v8si,v8si); +v4si __builtin_ia32_psllv4si (v4si,v4si); +v4di __builtin_ia32_psllv4di (v4di,v4di); +v2di __builtin_ia32_psllv2di (v2di,v2di); +v8si __builtin_ia32_psrav8si (v8si,v8si); +v4si __builtin_ia32_psrav4si (v4si,v4si); +v8si __builtin_ia32_psrlv8si (v8si,v8si); +v4si __builtin_ia32_psrlv4si (v4si,v4si); +v4di __builtin_ia32_psrlv4di (v4di,v4di); +v2di __builtin_ia32_psrlv2di (v2di,v2di); +v2df __builtin_ia32_gathersiv2df (v2df, pcdouble,v4si,v2df,int); +v4df __builtin_ia32_gathersiv4df (v4df, pcdouble,v4si,v4df,int); +v2df __builtin_ia32_gatherdiv2df (v2df, pcdouble,v2di,v2df,int); +v4df __builtin_ia32_gatherdiv4df (v4df, pcdouble,v4di,v4df,int); +v4sf __builtin_ia32_gathersiv4sf (v4sf, pcfloat,v4si,v4sf,int); +v8sf __builtin_ia32_gathersiv8sf (v8sf, pcfloat,v8si,v8sf,int); +v4sf __builtin_ia32_gatherdiv4sf (v4sf, pcfloat,v2di,v4sf,int); +v4sf __builtin_ia32_gatherdiv4sf256 (v4sf, pcfloat,v4di,v4sf,int); +v2di __builtin_ia32_gathersiv2di (v2di, pcint64,v4si,v2di,int); +v4di __builtin_ia32_gathersiv4di (v4di, pcint64,v4si,v4di,int); +v2di __builtin_ia32_gatherdiv2di (v2di, pcint64,v2di,v2di,int); +v4di __builtin_ia32_gatherdiv4di (v4di, pcint64,v4di,v4di,int); +v4si __builtin_ia32_gathersiv4si (v4si, pcint,v4si,v4si,int); +v8si __builtin_ia32_gathersiv8si (v8si, pcint,v8si,v8si,int); +v4si __builtin_ia32_gatherdiv4si (v4si, pcint,v2di,v4si,int); +v4si __builtin_ia32_gatherdiv4si256 (v4si, pcint,v4di,v4si,int); @end smallexample The following built-in functions are available when @option{-maes} is @@ -22750,12 +22743,12 @@ used. All of them generate the machine instruction that is part of the name. @smallexample -v2di __builtin_ia32_aesenc128 (v2di, v2di) -v2di __builtin_ia32_aesenclast128 (v2di, v2di) -v2di __builtin_ia32_aesdec128 (v2di, v2di) -v2di __builtin_ia32_aesdeclast128 (v2di, v2di) -v2di __builtin_ia32_aeskeygenassist128 (v2di, const int) -v2di __builtin_ia32_aesimc128 (v2di) +v2di __builtin_ia32_aesenc128 (v2di, v2di); +v2di __builtin_ia32_aesenclast128 (v2di, v2di); +v2di __builtin_ia32_aesdec128 (v2di, v2di); +v2di __builtin_ia32_aesdeclast128 (v2di, v2di); +v2di __builtin_ia32_aeskeygenassist128 (v2di, const int); +v2di __builtin_ia32_aesimc128 (v2di); @end smallexample The following built-in function is available when @option{-mpclmul} is @@ -22771,14 +22764,14 @@ used. All of them generate the machine instruction that is part of the name. @smallexample -unsigned int __builtin_ia32_rdfsbase32 (void) -unsigned long long __builtin_ia32_rdfsbase64 (void) -unsigned int __builtin_ia32_rdgsbase32 (void) -unsigned long long __builtin_ia32_rdgsbase64 (void) -void _writefsbase_u32 (unsigned int) -void _writefsbase_u64 (unsigned long long) -void _writegsbase_u32 (unsigned int) -void _writegsbase_u64 (unsigned long long) +unsigned int __builtin_ia32_rdfsbase32 (void); +unsigned long long __builtin_ia32_rdfsbase64 (void); +unsigned int __builtin_ia32_rdgsbase32 (void); +unsigned long long __builtin_ia32_rdgsbase64 (void); +void _writefsbase_u32 (unsigned int); +void _writefsbase_u64 (unsigned long long); +void _writegsbase_u32 (unsigned int); +void _writegsbase_u64 (unsigned long long); @end smallexample The following built-in function is available when @option{-mrdrnd} is @@ -22786,9 +22779,9 @@ used. All of them generate the machine instruction that is part of the name. @smallexample -unsigned int __builtin_ia32_rdrand16_step (unsigned short *) -unsigned int __builtin_ia32_rdrand32_step (unsigned int *) -unsigned int __builtin_ia32_rdrand64_step (unsigned long long *) +unsigned int __builtin_ia32_rdrand16_step (unsigned short *); +unsigned int __builtin_ia32_rdrand32_step (unsigned int *); +unsigned int __builtin_ia32_rdrand64_step (unsigned long long *); @end smallexample The following built-in function is available when @option{-mptwrite} is @@ -22796,186 +22789,186 @@ used. All of them generate the machine instruction that is part of the name. @smallexample -void __builtin_ia32_ptwrite32 (unsigned) -void __builtin_ia32_ptwrite64 (unsigned long long) +void __builtin_ia32_ptwrite32 (unsigned); +void __builtin_ia32_ptwrite64 (unsigned long long); @end smallexample The following built-in functions are available when @option{-msse4a} is used. All of them generate the machine instruction that is part of the name. @smallexample -void __builtin_ia32_movntsd (double *, v2df) -void __builtin_ia32_movntss (float *, v4sf) -v2di __builtin_ia32_extrq (v2di, v16qi) -v2di __builtin_ia32_extrqi (v2di, const unsigned int, const unsigned int) -v2di __builtin_ia32_insertq (v2di, v2di) -v2di __builtin_ia32_insertqi (v2di, v2di, const unsigned int, const unsigned int) +void __builtin_ia32_movntsd (double *, v2df); +void __builtin_ia32_movntss (float *, v4sf); +v2di __builtin_ia32_extrq (v2di, v16qi); +v2di __builtin_ia32_extrqi (v2di, const unsigned int, const unsigned int); +v2di __builtin_ia32_insertq (v2di, v2di); +v2di __builtin_ia32_insertqi (v2di, v2di, const unsigned int, const unsigned int); @end smallexample The following built-in functions are available when @option{-mxop} is used. @smallexample -v2df __builtin_ia32_vfrczpd (v2df) -v4sf __builtin_ia32_vfrczps (v4sf) -v2df __builtin_ia32_vfrczsd (v2df) -v4sf __builtin_ia32_vfrczss (v4sf) -v4df __builtin_ia32_vfrczpd256 (v4df) -v8sf __builtin_ia32_vfrczps256 (v8sf) -v2di __builtin_ia32_vpcmov (v2di, v2di, v2di) -v2di __builtin_ia32_vpcmov_v2di (v2di, v2di, v2di) -v4si __builtin_ia32_vpcmov_v4si (v4si, v4si, v4si) -v8hi __builtin_ia32_vpcmov_v8hi (v8hi, v8hi, v8hi) -v16qi __builtin_ia32_vpcmov_v16qi (v16qi, v16qi, v16qi) -v2df __builtin_ia32_vpcmov_v2df (v2df, v2df, v2df) -v4sf __builtin_ia32_vpcmov_v4sf (v4sf, v4sf, v4sf) -v4di __builtin_ia32_vpcmov_v4di256 (v4di, v4di, v4di) -v8si __builtin_ia32_vpcmov_v8si256 (v8si, v8si, v8si) -v16hi __builtin_ia32_vpcmov_v16hi256 (v16hi, v16hi, v16hi) -v32qi __builtin_ia32_vpcmov_v32qi256 (v32qi, v32qi, v32qi) -v4df __builtin_ia32_vpcmov_v4df256 (v4df, v4df, v4df) -v8sf __builtin_ia32_vpcmov_v8sf256 (v8sf, v8sf, v8sf) -v16qi __builtin_ia32_vpcomeqb (v16qi, v16qi) -v8hi __builtin_ia32_vpcomeqw (v8hi, v8hi) -v4si __builtin_ia32_vpcomeqd (v4si, v4si) -v2di __builtin_ia32_vpcomeqq (v2di, v2di) -v16qi __builtin_ia32_vpcomequb (v16qi, v16qi) -v4si __builtin_ia32_vpcomequd (v4si, v4si) -v2di __builtin_ia32_vpcomequq (v2di, v2di) -v8hi __builtin_ia32_vpcomequw (v8hi, v8hi) -v8hi __builtin_ia32_vpcomeqw (v8hi, v8hi) -v16qi __builtin_ia32_vpcomfalseb (v16qi, v16qi) -v4si __builtin_ia32_vpcomfalsed (v4si, v4si) -v2di __builtin_ia32_vpcomfalseq (v2di, v2di) -v16qi __builtin_ia32_vpcomfalseub (v16qi, v16qi) -v4si __builtin_ia32_vpcomfalseud (v4si, v4si) -v2di __builtin_ia32_vpcomfalseuq (v2di, v2di) -v8hi __builtin_ia32_vpcomfalseuw (v8hi, v8hi) -v8hi __builtin_ia32_vpcomfalsew (v8hi, v8hi) -v16qi __builtin_ia32_vpcomgeb (v16qi, v16qi) -v4si __builtin_ia32_vpcomged (v4si, v4si) -v2di __builtin_ia32_vpcomgeq (v2di, v2di) -v16qi __builtin_ia32_vpcomgeub (v16qi, v16qi) -v4si __builtin_ia32_vpcomgeud (v4si, v4si) -v2di __builtin_ia32_vpcomgeuq (v2di, v2di) -v8hi __builtin_ia32_vpcomgeuw (v8hi, v8hi) -v8hi __builtin_ia32_vpcomgew (v8hi, v8hi) -v16qi __builtin_ia32_vpcomgtb (v16qi, v16qi) -v4si __builtin_ia32_vpcomgtd (v4si, v4si) -v2di __builtin_ia32_vpcomgtq (v2di, v2di) -v16qi __builtin_ia32_vpcomgtub (v16qi, v16qi) -v4si __builtin_ia32_vpcomgtud (v4si, v4si) -v2di __builtin_ia32_vpcomgtuq (v2di, v2di) -v8hi __builtin_ia32_vpcomgtuw (v8hi, v8hi) -v8hi __builtin_ia32_vpcomgtw (v8hi, v8hi) -v16qi __builtin_ia32_vpcomleb (v16qi, v16qi) -v4si __builtin_ia32_vpcomled (v4si, v4si) -v2di __builtin_ia32_vpcomleq (v2di, v2di) -v16qi __builtin_ia32_vpcomleub (v16qi, v16qi) -v4si __builtin_ia32_vpcomleud (v4si, v4si) -v2di __builtin_ia32_vpcomleuq (v2di, v2di) -v8hi __builtin_ia32_vpcomleuw (v8hi, v8hi) -v8hi __builtin_ia32_vpcomlew (v8hi, v8hi) -v16qi __builtin_ia32_vpcomltb (v16qi, v16qi) -v4si __builtin_ia32_vpcomltd (v4si, v4si) -v2di __builtin_ia32_vpcomltq (v2di, v2di) -v16qi __builtin_ia32_vpcomltub (v16qi, v16qi) -v4si __builtin_ia32_vpcomltud (v4si, v4si) -v2di __builtin_ia32_vpcomltuq (v2di, v2di) -v8hi __builtin_ia32_vpcomltuw (v8hi, v8hi) -v8hi __builtin_ia32_vpcomltw (v8hi, v8hi) -v16qi __builtin_ia32_vpcomneb (v16qi, v16qi) -v4si __builtin_ia32_vpcomned (v4si, v4si) -v2di __builtin_ia32_vpcomneq (v2di, v2di) -v16qi __builtin_ia32_vpcomneub (v16qi, v16qi) -v4si __builtin_ia32_vpcomneud (v4si, v4si) -v2di __builtin_ia32_vpcomneuq (v2di, v2di) -v8hi __builtin_ia32_vpcomneuw (v8hi, v8hi) -v8hi __builtin_ia32_vpcomnew (v8hi, v8hi) -v16qi __builtin_ia32_vpcomtrueb (v16qi, v16qi) -v4si __builtin_ia32_vpcomtrued (v4si, v4si) -v2di __builtin_ia32_vpcomtrueq (v2di, v2di) -v16qi __builtin_ia32_vpcomtrueub (v16qi, v16qi) -v4si __builtin_ia32_vpcomtrueud (v4si, v4si) -v2di __builtin_ia32_vpcomtrueuq (v2di, v2di) -v8hi __builtin_ia32_vpcomtrueuw (v8hi, v8hi) -v8hi __builtin_ia32_vpcomtruew (v8hi, v8hi) -v4si __builtin_ia32_vphaddbd (v16qi) -v2di __builtin_ia32_vphaddbq (v16qi) -v8hi __builtin_ia32_vphaddbw (v16qi) -v2di __builtin_ia32_vphadddq (v4si) -v4si __builtin_ia32_vphaddubd (v16qi) -v2di __builtin_ia32_vphaddubq (v16qi) -v8hi __builtin_ia32_vphaddubw (v16qi) -v2di __builtin_ia32_vphaddudq (v4si) -v4si __builtin_ia32_vphadduwd (v8hi) -v2di __builtin_ia32_vphadduwq (v8hi) -v4si __builtin_ia32_vphaddwd (v8hi) -v2di __builtin_ia32_vphaddwq (v8hi) -v8hi __builtin_ia32_vphsubbw (v16qi) -v2di __builtin_ia32_vphsubdq (v4si) -v4si __builtin_ia32_vphsubwd (v8hi) -v4si __builtin_ia32_vpmacsdd (v4si, v4si, v4si) -v2di __builtin_ia32_vpmacsdqh (v4si, v4si, v2di) -v2di __builtin_ia32_vpmacsdql (v4si, v4si, v2di) -v4si __builtin_ia32_vpmacssdd (v4si, v4si, v4si) -v2di __builtin_ia32_vpmacssdqh (v4si, v4si, v2di) -v2di __builtin_ia32_vpmacssdql (v4si, v4si, v2di) -v4si __builtin_ia32_vpmacsswd (v8hi, v8hi, v4si) -v8hi __builtin_ia32_vpmacssww (v8hi, v8hi, v8hi) -v4si __builtin_ia32_vpmacswd (v8hi, v8hi, v4si) -v8hi __builtin_ia32_vpmacsww (v8hi, v8hi, v8hi) -v4si __builtin_ia32_vpmadcsswd (v8hi, v8hi, v4si) -v4si __builtin_ia32_vpmadcswd (v8hi, v8hi, v4si) -v16qi __builtin_ia32_vpperm (v16qi, v16qi, v16qi) -v16qi __builtin_ia32_vprotb (v16qi, v16qi) -v4si __builtin_ia32_vprotd (v4si, v4si) -v2di __builtin_ia32_vprotq (v2di, v2di) -v8hi __builtin_ia32_vprotw (v8hi, v8hi) -v16qi __builtin_ia32_vpshab (v16qi, v16qi) -v4si __builtin_ia32_vpshad (v4si, v4si) -v2di __builtin_ia32_vpshaq (v2di, v2di) -v8hi __builtin_ia32_vpshaw (v8hi, v8hi) -v16qi __builtin_ia32_vpshlb (v16qi, v16qi) -v4si __builtin_ia32_vpshld (v4si, v4si) -v2di __builtin_ia32_vpshlq (v2di, v2di) -v8hi __builtin_ia32_vpshlw (v8hi, v8hi) +v2df __builtin_ia32_vfrczpd (v2df); +v4sf __builtin_ia32_vfrczps (v4sf); +v2df __builtin_ia32_vfrczsd (v2df); +v4sf __builtin_ia32_vfrczss (v4sf); +v4df __builtin_ia32_vfrczpd256 (v4df); +v8sf __builtin_ia32_vfrczps256 (v8sf); +v2di __builtin_ia32_vpcmov (v2di, v2di, v2di); +v2di __builtin_ia32_vpcmov_v2di (v2di, v2di, v2di); +v4si __builtin_ia32_vpcmov_v4si (v4si, v4si, v4si); +v8hi __builtin_ia32_vpcmov_v8hi (v8hi, v8hi, v8hi); +v16qi __builtin_ia32_vpcmov_v16qi (v16qi, v16qi, v16qi); +v2df __builtin_ia32_vpcmov_v2df (v2df, v2df, v2df); +v4sf __builtin_ia32_vpcmov_v4sf (v4sf, v4sf, v4sf); +v4di __builtin_ia32_vpcmov_v4di256 (v4di, v4di, v4di); +v8si __builtin_ia32_vpcmov_v8si256 (v8si, v8si, v8si); +v16hi __builtin_ia32_vpcmov_v16hi256 (v16hi, v16hi, v16hi); +v32qi __builtin_ia32_vpcmov_v32qi256 (v32qi, v32qi, v32qi); +v4df __builtin_ia32_vpcmov_v4df256 (v4df, v4df, v4df); +v8sf __builtin_ia32_vpcmov_v8sf256 (v8sf, v8sf, v8sf); +v16qi __builtin_ia32_vpcomeqb (v16qi, v16qi); +v8hi __builtin_ia32_vpcomeqw (v8hi, v8hi); +v4si __builtin_ia32_vpcomeqd (v4si, v4si); +v2di __builtin_ia32_vpcomeqq (v2di, v2di); +v16qi __builtin_ia32_vpcomequb (v16qi, v16qi); +v4si __builtin_ia32_vpcomequd (v4si, v4si); +v2di __builtin_ia32_vpcomequq (v2di, v2di); +v8hi __builtin_ia32_vpcomequw (v8hi, v8hi); +v8hi __builtin_ia32_vpcomeqw (v8hi, v8hi); +v16qi __builtin_ia32_vpcomfalseb (v16qi, v16qi); +v4si __builtin_ia32_vpcomfalsed (v4si, v4si); +v2di __builtin_ia32_vpcomfalseq (v2di, v2di); +v16qi __builtin_ia32_vpcomfalseub (v16qi, v16qi); +v4si __builtin_ia32_vpcomfalseud (v4si, v4si); +v2di __builtin_ia32_vpcomfalseuq (v2di, v2di); +v8hi __builtin_ia32_vpcomfalseuw (v8hi, v8hi); +v8hi __builtin_ia32_vpcomfalsew (v8hi, v8hi); +v16qi __builtin_ia32_vpcomgeb (v16qi, v16qi); +v4si __builtin_ia32_vpcomged (v4si, v4si); +v2di __builtin_ia32_vpcomgeq (v2di, v2di); +v16qi __builtin_ia32_vpcomgeub (v16qi, v16qi); +v4si __builtin_ia32_vpcomgeud (v4si, v4si); +v2di __builtin_ia32_vpcomgeuq (v2di, v2di); +v8hi __builtin_ia32_vpcomgeuw (v8hi, v8hi); +v8hi __builtin_ia32_vpcomgew (v8hi, v8hi); +v16qi __builtin_ia32_vpcomgtb (v16qi, v16qi); +v4si __builtin_ia32_vpcomgtd (v4si, v4si); +v2di __builtin_ia32_vpcomgtq (v2di, v2di); +v16qi __builtin_ia32_vpcomgtub (v16qi, v16qi); +v4si __builtin_ia32_vpcomgtud (v4si, v4si); +v2di __builtin_ia32_vpcomgtuq (v2di, v2di); +v8hi __builtin_ia32_vpcomgtuw (v8hi, v8hi); +v8hi __builtin_ia32_vpcomgtw (v8hi, v8hi); +v16qi __builtin_ia32_vpcomleb (v16qi, v16qi); +v4si __builtin_ia32_vpcomled (v4si, v4si); +v2di __builtin_ia32_vpcomleq (v2di, v2di); +v16qi __builtin_ia32_vpcomleub (v16qi, v16qi); +v4si __builtin_ia32_vpcomleud (v4si, v4si); +v2di __builtin_ia32_vpcomleuq (v2di, v2di); +v8hi __builtin_ia32_vpcomleuw (v8hi, v8hi); +v8hi __builtin_ia32_vpcomlew (v8hi, v8hi); +v16qi __builtin_ia32_vpcomltb (v16qi, v16qi); +v4si __builtin_ia32_vpcomltd (v4si, v4si); +v2di __builtin_ia32_vpcomltq (v2di, v2di); +v16qi __builtin_ia32_vpcomltub (v16qi, v16qi); +v4si __builtin_ia32_vpcomltud (v4si, v4si); +v2di __builtin_ia32_vpcomltuq (v2di, v2di); +v8hi __builtin_ia32_vpcomltuw (v8hi, v8hi); +v8hi __builtin_ia32_vpcomltw (v8hi, v8hi); +v16qi __builtin_ia32_vpcomneb (v16qi, v16qi); +v4si __builtin_ia32_vpcomned (v4si, v4si); +v2di __builtin_ia32_vpcomneq (v2di, v2di); +v16qi __builtin_ia32_vpcomneub (v16qi, v16qi); +v4si __builtin_ia32_vpcomneud (v4si, v4si); +v2di __builtin_ia32_vpcomneuq (v2di, v2di); +v8hi __builtin_ia32_vpcomneuw (v8hi, v8hi); +v8hi __builtin_ia32_vpcomnew (v8hi, v8hi); +v16qi __builtin_ia32_vpcomtrueb (v16qi, v16qi); +v4si __builtin_ia32_vpcomtrued (v4si, v4si); +v2di __builtin_ia32_vpcomtrueq (v2di, v2di); +v16qi __builtin_ia32_vpcomtrueub (v16qi, v16qi); +v4si __builtin_ia32_vpcomtrueud (v4si, v4si); +v2di __builtin_ia32_vpcomtrueuq (v2di, v2di); +v8hi __builtin_ia32_vpcomtrueuw (v8hi, v8hi); +v8hi __builtin_ia32_vpcomtruew (v8hi, v8hi); +v4si __builtin_ia32_vphaddbd (v16qi); +v2di __builtin_ia32_vphaddbq (v16qi); +v8hi __builtin_ia32_vphaddbw (v16qi); +v2di __builtin_ia32_vphadddq (v4si); +v4si __builtin_ia32_vphaddubd (v16qi); +v2di __builtin_ia32_vphaddubq (v16qi); +v8hi __builtin_ia32_vphaddubw (v16qi); +v2di __builtin_ia32_vphaddudq (v4si); +v4si __builtin_ia32_vphadduwd (v8hi); +v2di __builtin_ia32_vphadduwq (v8hi); +v4si __builtin_ia32_vphaddwd (v8hi); +v2di __builtin_ia32_vphaddwq (v8hi); +v8hi __builtin_ia32_vphsubbw (v16qi); +v2di __builtin_ia32_vphsubdq (v4si); +v4si __builtin_ia32_vphsubwd (v8hi); +v4si __builtin_ia32_vpmacsdd (v4si, v4si, v4si); +v2di __builtin_ia32_vpmacsdqh (v4si, v4si, v2di); +v2di __builtin_ia32_vpmacsdql (v4si, v4si, v2di); +v4si __builtin_ia32_vpmacssdd (v4si, v4si, v4si); +v2di __builtin_ia32_vpmacssdqh (v4si, v4si, v2di); +v2di __builtin_ia32_vpmacssdql (v4si, v4si, v2di); +v4si __builtin_ia32_vpmacsswd (v8hi, v8hi, v4si); +v8hi __builtin_ia32_vpmacssww (v8hi, v8hi, v8hi); +v4si __builtin_ia32_vpmacswd (v8hi, v8hi, v4si); +v8hi __builtin_ia32_vpmacsww (v8hi, v8hi, v8hi); +v4si __builtin_ia32_vpmadcsswd (v8hi, v8hi, v4si); +v4si __builtin_ia32_vpmadcswd (v8hi, v8hi, v4si); +v16qi __builtin_ia32_vpperm (v16qi, v16qi, v16qi); +v16qi __builtin_ia32_vprotb (v16qi, v16qi); +v4si __builtin_ia32_vprotd (v4si, v4si); +v2di __builtin_ia32_vprotq (v2di, v2di); +v8hi __builtin_ia32_vprotw (v8hi, v8hi); +v16qi __builtin_ia32_vpshab (v16qi, v16qi); +v4si __builtin_ia32_vpshad (v4si, v4si); +v2di __builtin_ia32_vpshaq (v2di, v2di); +v8hi __builtin_ia32_vpshaw (v8hi, v8hi); +v16qi __builtin_ia32_vpshlb (v16qi, v16qi); +v4si __builtin_ia32_vpshld (v4si, v4si); +v2di __builtin_ia32_vpshlq (v2di, v2di); +v8hi __builtin_ia32_vpshlw (v8hi, v8hi); @end smallexample The following built-in functions are available when @option{-mfma4} is used. All of them generate the machine instruction that is part of the name. @smallexample -v2df __builtin_ia32_vfmaddpd (v2df, v2df, v2df) -v4sf __builtin_ia32_vfmaddps (v4sf, v4sf, v4sf) -v2df __builtin_ia32_vfmaddsd (v2df, v2df, v2df) -v4sf __builtin_ia32_vfmaddss (v4sf, v4sf, v4sf) -v2df __builtin_ia32_vfmsubpd (v2df, v2df, v2df) -v4sf __builtin_ia32_vfmsubps (v4sf, v4sf, v4sf) -v2df __builtin_ia32_vfmsubsd (v2df, v2df, v2df) -v4sf __builtin_ia32_vfmsubss (v4sf, v4sf, v4sf) -v2df __builtin_ia32_vfnmaddpd (v2df, v2df, v2df) -v4sf __builtin_ia32_vfnmaddps (v4sf, v4sf, v4sf) -v2df __builtin_ia32_vfnmaddsd (v2df, v2df, v2df) -v4sf __builtin_ia32_vfnmaddss (v4sf, v4sf, v4sf) -v2df __builtin_ia32_vfnmsubpd (v2df, v2df, v2df) -v4sf __builtin_ia32_vfnmsubps (v4sf, v4sf, v4sf) -v2df __builtin_ia32_vfnmsubsd (v2df, v2df, v2df) -v4sf __builtin_ia32_vfnmsubss (v4sf, v4sf, v4sf) -v2df __builtin_ia32_vfmaddsubpd (v2df, v2df, v2df) -v4sf __builtin_ia32_vfmaddsubps (v4sf, v4sf, v4sf) -v2df __builtin_ia32_vfmsubaddpd (v2df, v2df, v2df) -v4sf __builtin_ia32_vfmsubaddps (v4sf, v4sf, v4sf) -v4df __builtin_ia32_vfmaddpd256 (v4df, v4df, v4df) -v8sf __builtin_ia32_vfmaddps256 (v8sf, v8sf, v8sf) -v4df __builtin_ia32_vfmsubpd256 (v4df, v4df, v4df) -v8sf __builtin_ia32_vfmsubps256 (v8sf, v8sf, v8sf) -v4df __builtin_ia32_vfnmaddpd256 (v4df, v4df, v4df) -v8sf __builtin_ia32_vfnmaddps256 (v8sf, v8sf, v8sf) -v4df __builtin_ia32_vfnmsubpd256 (v4df, v4df, v4df) -v8sf __builtin_ia32_vfnmsubps256 (v8sf, v8sf, v8sf) -v4df __builtin_ia32_vfmaddsubpd256 (v4df, v4df, v4df) -v8sf __builtin_ia32_vfmaddsubps256 (v8sf, v8sf, v8sf) -v4df __builtin_ia32_vfmsubaddpd256 (v4df, v4df, v4df) -v8sf __builtin_ia32_vfmsubaddps256 (v8sf, v8sf, v8sf) +v2df __builtin_ia32_vfmaddpd (v2df, v2df, v2df); +v4sf __builtin_ia32_vfmaddps (v4sf, v4sf, v4sf); +v2df __builtin_ia32_vfmaddsd (v2df, v2df, v2df); +v4sf __builtin_ia32_vfmaddss (v4sf, v4sf, v4sf); +v2df __builtin_ia32_vfmsubpd (v2df, v2df, v2df); +v4sf __builtin_ia32_vfmsubps (v4sf, v4sf, v4sf); +v2df __builtin_ia32_vfmsubsd (v2df, v2df, v2df); +v4sf __builtin_ia32_vfmsubss (v4sf, v4sf, v4sf); +v2df __builtin_ia32_vfnmaddpd (v2df, v2df, v2df); +v4sf __builtin_ia32_vfnmaddps (v4sf, v4sf, v4sf); +v2df __builtin_ia32_vfnmaddsd (v2df, v2df, v2df); +v4sf __builtin_ia32_vfnmaddss (v4sf, v4sf, v4sf); +v2df __builtin_ia32_vfnmsubpd (v2df, v2df, v2df); +v4sf __builtin_ia32_vfnmsubps (v4sf, v4sf, v4sf); +v2df __builtin_ia32_vfnmsubsd (v2df, v2df, v2df); +v4sf __builtin_ia32_vfnmsubss (v4sf, v4sf, v4sf); +v2df __builtin_ia32_vfmaddsubpd (v2df, v2df, v2df); +v4sf __builtin_ia32_vfmaddsubps (v4sf, v4sf, v4sf); +v2df __builtin_ia32_vfmsubaddpd (v2df, v2df, v2df); +v4sf __builtin_ia32_vfmsubaddps (v4sf, v4sf, v4sf); +v4df __builtin_ia32_vfmaddpd256 (v4df, v4df, v4df); +v8sf __builtin_ia32_vfmaddps256 (v8sf, v8sf, v8sf); +v4df __builtin_ia32_vfmsubpd256 (v4df, v4df, v4df); +v8sf __builtin_ia32_vfmsubps256 (v8sf, v8sf, v8sf); +v4df __builtin_ia32_vfnmaddpd256 (v4df, v4df, v4df); +v8sf __builtin_ia32_vfnmaddps256 (v8sf, v8sf, v8sf); +v4df __builtin_ia32_vfnmsubpd256 (v4df, v4df, v4df); +v8sf __builtin_ia32_vfnmsubps256 (v8sf, v8sf, v8sf); +v4df __builtin_ia32_vfmaddsubpd256 (v4df, v4df, v4df); +v8sf __builtin_ia32_vfmaddsubps256 (v8sf, v8sf, v8sf); +v4df __builtin_ia32_vfmsubaddpd256 (v4df, v4df, v4df); +v8sf __builtin_ia32_vfmsubaddps256 (v8sf, v8sf, v8sf); @end smallexample @@ -22988,12 +22981,12 @@ void __builtin_ia32_llwpcb64 (void *); void * __builtin_ia32_llwpcb16 (void); void * __builtin_ia32_llwpcb32 (void); void * __builtin_ia32_llwpcb64 (void); -void __builtin_ia32_lwpval16 (unsigned short, unsigned int, unsigned short) -void __builtin_ia32_lwpval32 (unsigned int, unsigned int, unsigned int) -void __builtin_ia32_lwpval64 (unsigned __int64, unsigned int, unsigned int) -unsigned char __builtin_ia32_lwpins16 (unsigned short, unsigned int, unsigned short) -unsigned char __builtin_ia32_lwpins32 (unsigned int, unsigned int, unsigned int) -unsigned char __builtin_ia32_lwpins64 (unsigned __int64, unsigned int, unsigned int) +void __builtin_ia32_lwpval16 (unsigned short, unsigned int, unsigned short); +void __builtin_ia32_lwpval32 (unsigned int, unsigned int, unsigned int); +void __builtin_ia32_lwpval64 (unsigned __int64, unsigned int, unsigned int); +unsigned char __builtin_ia32_lwpins16 (unsigned short, unsigned int, unsigned short); +unsigned char __builtin_ia32_lwpins32 (unsigned int, unsigned int, unsigned int); +unsigned char __builtin_ia32_lwpins64 (unsigned __int64, unsigned int, unsigned int); @end smallexample The following built-in functions are available when @option{-mbmi} is used. @@ -23006,12 +22999,12 @@ unsigned long long __builtin_ia32_bextr_u64 (unsigned long long, unsigned long l The following built-in functions are available when @option{-mbmi2} is used. All of them generate the machine instruction that is part of the name. @smallexample -unsigned int _bzhi_u32 (unsigned int, unsigned int) -unsigned int _pdep_u32 (unsigned int, unsigned int) -unsigned int _pext_u32 (unsigned int, unsigned int) -unsigned long long _bzhi_u64 (unsigned long long, unsigned long long) -unsigned long long _pdep_u64 (unsigned long long, unsigned long long) -unsigned long long _pext_u64 (unsigned long long, unsigned long long) +unsigned int _bzhi_u32 (unsigned int, unsigned int); +unsigned int _pdep_u32 (unsigned int, unsigned int); +unsigned int _pext_u32 (unsigned int, unsigned int); +unsigned long long _bzhi_u64 (unsigned long long, unsigned long long); +unsigned long long _pdep_u64 (unsigned long long, unsigned long long); +unsigned long long _pext_u64 (unsigned long long, unsigned long long); @end smallexample The following built-in functions are available when @option{-mlzcnt} is used. @@ -23025,26 +23018,26 @@ unsigned long long __builtin_ia32_lzcnt_u64 (unsigned long long); The following built-in functions are available when @option{-mfxsr} is used. All of them generate the machine instruction that is part of the name. @smallexample -void __builtin_ia32_fxsave (void *) -void __builtin_ia32_fxrstor (void *) -void __builtin_ia32_fxsave64 (void *) -void __builtin_ia32_fxrstor64 (void *) +void __builtin_ia32_fxsave (void *); +void __builtin_ia32_fxrstor (void *); +void __builtin_ia32_fxsave64 (void *); +void __builtin_ia32_fxrstor64 (void *); @end smallexample The following built-in functions are available when @option{-mxsave} is used. All of them generate the machine instruction that is part of the name. @smallexample -void __builtin_ia32_xsave (void *, long long) -void __builtin_ia32_xrstor (void *, long long) -void __builtin_ia32_xsave64 (void *, long long) -void __builtin_ia32_xrstor64 (void *, long long) +void __builtin_ia32_xsave (void *, long long); +void __builtin_ia32_xrstor (void *, long long); +void __builtin_ia32_xsave64 (void *, long long); +void __builtin_ia32_xrstor64 (void *, long long); @end smallexample The following built-in functions are available when @option{-mxsaveopt} is used. All of them generate the machine instruction that is part of the name. @smallexample -void __builtin_ia32_xsaveopt (void *, long long) -void __builtin_ia32_xsaveopt64 (void *, long long) +void __builtin_ia32_xsaveopt (void *, long long); +void __builtin_ia32_xsaveopt64 (void *, long long); @end smallexample The following built-in functions are available when @option{-mtbm} is used. @@ -23061,37 +23054,37 @@ The following built-in functions are available when @option{-m3dnow} is used. All of them generate the machine instruction that is part of the name. @smallexample -void __builtin_ia32_femms (void) -v8qi __builtin_ia32_pavgusb (v8qi, v8qi) -v2si __builtin_ia32_pf2id (v2sf) -v2sf __builtin_ia32_pfacc (v2sf, v2sf) -v2sf __builtin_ia32_pfadd (v2sf, v2sf) -v2si __builtin_ia32_pfcmpeq (v2sf, v2sf) -v2si __builtin_ia32_pfcmpge (v2sf, v2sf) -v2si __builtin_ia32_pfcmpgt (v2sf, v2sf) -v2sf __builtin_ia32_pfmax (v2sf, v2sf) -v2sf __builtin_ia32_pfmin (v2sf, v2sf) -v2sf __builtin_ia32_pfmul (v2sf, v2sf) -v2sf __builtin_ia32_pfrcp (v2sf) -v2sf __builtin_ia32_pfrcpit1 (v2sf, v2sf) -v2sf __builtin_ia32_pfrcpit2 (v2sf, v2sf) -v2sf __builtin_ia32_pfrsqrt (v2sf) -v2sf __builtin_ia32_pfsub (v2sf, v2sf) -v2sf __builtin_ia32_pfsubr (v2sf, v2sf) -v2sf __builtin_ia32_pi2fd (v2si) -v4hi __builtin_ia32_pmulhrw (v4hi, v4hi) +void __builtin_ia32_femms (void); +v8qi __builtin_ia32_pavgusb (v8qi, v8qi); +v2si __builtin_ia32_pf2id (v2sf); +v2sf __builtin_ia32_pfacc (v2sf, v2sf); +v2sf __builtin_ia32_pfadd (v2sf, v2sf); +v2si __builtin_ia32_pfcmpeq (v2sf, v2sf); +v2si __builtin_ia32_pfcmpge (v2sf, v2sf); +v2si __builtin_ia32_pfcmpgt (v2sf, v2sf); +v2sf __builtin_ia32_pfmax (v2sf, v2sf); +v2sf __builtin_ia32_pfmin (v2sf, v2sf); +v2sf __builtin_ia32_pfmul (v2sf, v2sf); +v2sf __builtin_ia32_pfrcp (v2sf); +v2sf __builtin_ia32_pfrcpit1 (v2sf, v2sf); +v2sf __builtin_ia32_pfrcpit2 (v2sf, v2sf); +v2sf __builtin_ia32_pfrsqrt (v2sf); +v2sf __builtin_ia32_pfsub (v2sf, v2sf); +v2sf __builtin_ia32_pfsubr (v2sf, v2sf); +v2sf __builtin_ia32_pi2fd (v2si); +v4hi __builtin_ia32_pmulhrw (v4hi, v4hi); @end smallexample The following built-in functions are available when @option{-m3dnowa} is used. All of them generate the machine instruction that is part of the name. @smallexample -v2si __builtin_ia32_pf2iw (v2sf) -v2sf __builtin_ia32_pfnacc (v2sf, v2sf) -v2sf __builtin_ia32_pfpnacc (v2sf, v2sf) -v2sf __builtin_ia32_pi2fw (v2si) -v2sf __builtin_ia32_pswapdsf (v2sf) -v2si __builtin_ia32_pswapdsi (v2si) +v2si __builtin_ia32_pf2iw (v2sf); +v2sf __builtin_ia32_pfnacc (v2sf, v2sf); +v2sf __builtin_ia32_pfpnacc (v2sf, v2sf); +v2sf __builtin_ia32_pi2fw (v2si); +v2sf __builtin_ia32_pswapdsf (v2sf); +v2si __builtin_ia32_pswapdsi (v2si); @end smallexample The following built-in functions are available when @option{-mrtm} is used @@ -23100,30 +23093,30 @@ low level functions. Normally the functions in @ref{x86 transactional memory intrinsics} should be used instead. @smallexample -int __builtin_ia32_xbegin () -void __builtin_ia32_xend () -void __builtin_ia32_xabort (status) -int __builtin_ia32_xtest () +int __builtin_ia32_xbegin (); +void __builtin_ia32_xend (); +void __builtin_ia32_xabort (status); +int __builtin_ia32_xtest (); @end smallexample The following built-in functions are available when @option{-mmwaitx} is used. All of them generate the machine instruction that is part of the name. @smallexample -void __builtin_ia32_monitorx (void *, unsigned int, unsigned int) -void __builtin_ia32_mwaitx (unsigned int, unsigned int, unsigned int) +void __builtin_ia32_monitorx (void *, unsigned int, unsigned int); +void __builtin_ia32_mwaitx (unsigned int, unsigned int, unsigned int); @end smallexample The following built-in functions are available when @option{-mclzero} is used. All of them generate the machine instruction that is part of the name. @smallexample -void __builtin_i32_clzero (void *) +void __builtin_i32_clzero (void *); @end smallexample The following built-in functions are available when @option{-mpku} is used. They generate reads and writes to PKRU. @smallexample -void __builtin_ia32_wrpkru (unsigned int) -unsigned int __builtin_ia32_rdpkru () +void __builtin_ia32_wrpkru (unsigned int); +unsigned int __builtin_ia32_rdpkru (); @end smallexample The following built-in functions are available when @@ -23135,10 +23128,10 @@ Normally the functions in @ref{x86 control-flow protection intrinsics} should be used instead. @smallexample -unsigned int __builtin_ia32_rdsspd (void) -unsigned long long __builtin_ia32_rdsspq (void) -void __builtin_ia32_incsspd (unsigned int) -void __builtin_ia32_incsspq (unsigned long long) +unsigned int __builtin_ia32_rdsspd (void); +unsigned long long __builtin_ia32_rdsspq (void); +void __builtin_ia32_incsspd (unsigned int); +void __builtin_ia32_incsspq (unsigned long long); void __builtin_ia32_saveprevssp(void); void __builtin_ia32_rstorssp(void *); void __builtin_ia32_wrssd(unsigned int, void *); diff --git a/gcc/jit/ChangeLog b/gcc/jit/ChangeLog index 0b44fc1..9f41007 100644 --- a/gcc/jit/ChangeLog +++ b/gcc/jit/ChangeLog @@ -1,3 +1,11 @@ +2021-12-20 Martin Liska <mliska@suse.cz> + + * libgccjit.c (struct version_info): Rename to jit_version_info. + (struct jit_version_info): Likewise. + (gcc_jit_version_major): Likewise. + (gcc_jit_version_minor): Likewise. + (gcc_jit_version_patchlevel): Likewise. + 2021-12-14 Petter Tomner <tomner@kth.se> * jit-common.h: New enum diff --git a/gcc/jit/libgccjit.c b/gcc/jit/libgccjit.c index 5cb27a2..3d2d838 100644 --- a/gcc/jit/libgccjit.c +++ b/gcc/jit/libgccjit.c @@ -3954,11 +3954,11 @@ gcc_jit_context_new_rvalue_from_vector (gcc_jit_context *ctxt, static pthread_mutex_t version_mutex = PTHREAD_MUTEX_INITIALIZER; -struct version_info +struct jit_version_info { /* Default constructor. Populate via parse_basever, guarded by version_mutex. */ - version_info () + jit_version_info () { pthread_mutex_lock (&version_mutex); parse_basever (&major, &minor, &patchlevel); @@ -3974,21 +3974,21 @@ struct version_info extern int gcc_jit_version_major (void) { - version_info vi; + jit_version_info vi; return vi.major; } extern int gcc_jit_version_minor (void) { - version_info vi; + jit_version_info vi; return vi.minor; } extern int gcc_jit_version_patchlevel (void) { - version_info vi; + jit_version_info vi; return vi.patchlevel; } @@ -723,7 +723,7 @@ default_options_optimization (struct gcc_options *opts, const int optimize_val = integral_argument (opt->arg); if (optimize_val == -1) error_at (loc, "argument to %<-O%> should be a non-negative " - "integer, %<g%>, %<s%> or %<fast%>"); + "integer, %<g%>, %<s%>, %<z%> or %<fast%>"); else { opts->x_optimize = optimize_val; diff --git a/gcc/predict.c b/gcc/predict.c index 1a1da7e..1316ca8 100644 --- a/gcc/predict.c +++ b/gcc/predict.c @@ -1859,7 +1859,7 @@ predict_iv_comparison (class loop *loop, basic_block bb, exits to predict them using PRED_LOOP_EXTRA_EXIT. */ static void -predict_extra_loop_exits (edge exit_edge) +predict_extra_loop_exits (class loop *loop, edge exit_edge) { unsigned i; bool check_value_one; @@ -1912,12 +1912,14 @@ predict_extra_loop_exits (edge exit_edge) continue; if (EDGE_COUNT (e->src->succs) != 1) { - predict_paths_leading_to_edge (e, PRED_LOOP_EXTRA_EXIT, NOT_TAKEN); + predict_paths_leading_to_edge (e, PRED_LOOP_EXTRA_EXIT, NOT_TAKEN, + loop); continue; } FOR_EACH_EDGE (e1, ei, e->src->preds) - predict_paths_leading_to_edge (e1, PRED_LOOP_EXTRA_EXIT, NOT_TAKEN); + predict_paths_leading_to_edge (e1, PRED_LOOP_EXTRA_EXIT, NOT_TAKEN, + loop); } } @@ -2008,7 +2010,7 @@ predict_loops (void) ex->src->index, ex->dest->index); continue; } - predict_extra_loop_exits (ex); + predict_extra_loop_exits (loop, ex); if (number_of_iterations_exit (loop, ex, &niter_desc, false, false)) niter = niter_desc.niter; diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 87e4a2a..a9529ab 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,59 @@ +2021-12-21 Jiang Haochen <haochen.jiang@intel.com> + + * gcc.target/i386/bmi-1.c: Add test for new intrinsic. + * gcc.target/i386/bmi-2.c: Ditto. + * gcc.target/i386/bmi-3.c: Ditto. + +2021-12-21 Xionghu Luo <luoxhu@linux.ibm.com> + + * gcc.dg/tree-ssa/recip-3.c: Adjust. + * gcc.dg/tree-ssa/ssa-lim-19.c: New test. + * gcc.dg/tree-ssa/ssa-lim-20.c: New test. + * gcc.dg/tree-ssa/ssa-lim-21.c: New test. + * gcc.dg/tree-ssa/ssa-lim-22.c: New test. + * gcc.dg/tree-ssa/ssa-lim-23.c: New test. + +2021-12-21 Xionghu Luo <luoxhu@linux.ibm.com> + + PR middle-end/103270 + * gcc.dg/pr103270.c: New test. + +2021-12-20 Patrick Palka <ppalka@redhat.com> + + * g++.dg/lookup/memfn1.C: New test. + * g++.dg/template/non-dependent16b.C: New test. + +2021-12-20 Olivier Hainque <hainque@adacore.com> + + * gcc.dg/vect/vect-simd-20.c: Fix size of p[] + to accommodate the number of strides performed + by foo() for s == 78. + +2021-12-20 Roger Sayle <roger@nextmovesoftware.com> + Uroš Bizjak <ubizjak@gmail.com> + + * gcc.target/i386/smuldi3_highpart.c: New test case. + +2021-12-20 Patrick Palka <ppalka@redhat.com> + + * g++.dg/cpp0x/error2.C: Make the call to foo type-dependent in + order to avoid latent pretty-printing issue for FUNCTION_DECL + inside MODOP_EXPR. + * g++.dg/cpp0x/fntmp-equiv1.C: Make the calls to d, d2 and d3 + within the function signatures dependent. + * g++.dg/template/non-dependent16.C: New test. + * g++.dg/template/non-dependent16a.C: New test. + * g++.dg/template/non-dependent17.C: New test. + +2021-12-20 Jan Hubicka <hubicka@ucw.cz> + + PR ipa/103669 + * g++.dg/torture/pr103669.C: New test. + +2021-12-20 liuhongt <hongtao.liu@intel.com> + + * gcc.target/i386/pr98468.c: New test. + 2021-12-19 Andrew Pinski <apinski@marvell.com> * gcc.dg/uninit-pr89230-1.c: Change the dg-bogus messages diff --git a/gcc/testsuite/g++.dg/cpp0x/error2.C b/gcc/testsuite/g++.dg/cpp0x/error2.C index e6af294..eb96636 100644 --- a/gcc/testsuite/g++.dg/cpp0x/error2.C +++ b/gcc/testsuite/g++.dg/cpp0x/error2.C @@ -3,7 +3,7 @@ template<int> int foo(); -template<typename F> void bar(F f) +template<typename F, int N> void bar(F f) { - f((foo<0>()=0)...); // { dg-error "pattern '\\(foo\\<0\\>\\)\\(\\)=0'" } + f((foo<N>()=0)...); // { dg-error "pattern '\\(foo\\<N\\>\\)\\(\\)=0'" } } diff --git a/gcc/testsuite/g++.dg/cpp0x/fntmp-equiv1.C b/gcc/testsuite/g++.dg/cpp0x/fntmp-equiv1.C index 833ae6f..c7d7d60 100644 --- a/gcc/testsuite/g++.dg/cpp0x/fntmp-equiv1.C +++ b/gcc/testsuite/g++.dg/cpp0x/fntmp-equiv1.C @@ -3,21 +3,21 @@ int d(int, int); template <long> class e {}; -template <unsigned long f, unsigned b, typename> e<sizeof(d(f, b))> d(); -template <unsigned long f, unsigned b, typename> e<d(f, b)> d(); +template <class T> e<sizeof(d(T{}, T{}))> d(...); +template <class T> e<d(T{}, T{})> d(...); template <class T, class U> constexpr T d2(T, U) { return 42; } -template <unsigned long f, unsigned b, typename> e<d2(f, b)> d2(); -template <unsigned long f, unsigned b, typename> e<d2(f, b)> d2(); +template <class T> e<d2(T{}, T{})> d2(...); +template <class T> e<d2(T{}, T{})> d2(...); template <typename a, typename c> a d3(a, c); -template <unsigned long f, unsigned b, typename> e<sizeof(d3(f, b))> d3(); -template <unsigned long f, unsigned b, typename> e<sizeof(d3(f, b))> d3(); +template <class T> e<sizeof(d3(T{}, T{}))> d3(...); +template <class T> e<sizeof(d3(T{}, T{}))> d3(...); int main() { - d<1,2,int>(); - d2<1,2,int>(); - d3<1,2,int>(); + d<int>(); + d2<int>(); + d3<int>(); } diff --git a/gcc/testsuite/g++.dg/lookup/memfn1.C b/gcc/testsuite/g++.dg/lookup/memfn1.C new file mode 100644 index 0000000..8f8e5d9 --- /dev/null +++ b/gcc/testsuite/g++.dg/lookup/memfn1.C @@ -0,0 +1,16 @@ +// Verify we preserve the consistency of member function lookup outside of a +// complete-class context. +// { dg-do compile { target c++11 } } + +template<class...> +struct A { + template<class T> static void f(); // #1 + template<class T> static auto g() -> decltype(f<T>()); + template<class T> static void f(...); // #2 +}; + +int main() { + A<>::g<int>(); // OK, the later-declared #2 isn't considered when + // instantiating f<T>(), which would have otherwise + // led to ambiguity. +} diff --git a/gcc/testsuite/g++.dg/template/non-dependent16.C b/gcc/testsuite/g++.dg/template/non-dependent16.C new file mode 100644 index 0000000..ee8ef90 --- /dev/null +++ b/gcc/testsuite/g++.dg/template/non-dependent16.C @@ -0,0 +1,37 @@ +// This test verifies that after resolving a non-dependent call expression +// ahead of time, we prune all but the selected candidate from the overload +// set. Without this optimization, overload resolution for the final call to +// f<void>() would be exponential in the size of the overload set. + +// { dg-do compile { target c++11 } } + +template<class T> void f(); +template<class T> auto f() -> decltype(f<void>(), 1, *T()); +template<class T> auto f() -> decltype(f<void>(), 2, *T()); +template<class T> auto f() -> decltype(f<void>(), 3, *T()); +template<class T> auto f() -> decltype(f<void>(), 4, *T()); +template<class T> auto f() -> decltype(f<void>(), 5, *T()); +template<class T> auto f() -> decltype(f<void>(), 6, *T()); +template<class T> auto f() -> decltype(f<void>(), 7, *T()); +template<class T> auto f() -> decltype(f<void>(), 8, *T()); +template<class T> auto f() -> decltype(f<void>(), 9, *T()); +template<class T> auto f() -> decltype(f<void>(), 10, *T()); +template<class T> auto f() -> decltype(f<void>(), 11, *T()); +template<class T> auto f() -> decltype(f<void>(), 12, *T()); +template<class T> auto f() -> decltype(f<void>(), 13, *T()); +template<class T> auto f() -> decltype(f<void>(), 14, *T()); +template<class T> auto f() -> decltype(f<void>(), 15, *T()); +template<class T> auto f() -> decltype(f<void>(), 16, *T()); +template<class T> auto f() -> decltype(f<void>(), 17, *T()); +template<class T> auto f() -> decltype(f<void>(), 18, *T()); +template<class T> auto f() -> decltype(f<void>(), 19, *T()); +template<class T> auto f() -> decltype(f<void>(), 20, *T()); +template<class T> auto f() -> decltype(f<void>(), 21, *T()); +template<class T> auto f() -> decltype(f<void>(), 22, *T()); +template<class T> auto f() -> decltype(f<void>(), 23, *T()); +template<class T> auto f() -> decltype(f<void>(), 24, *T()); +template<class T> auto f() -> decltype(f<void>(), 25, *T()); + +int main() { + f<void>(); +} diff --git a/gcc/testsuite/g++.dg/template/non-dependent16a.C b/gcc/testsuite/g++.dg/template/non-dependent16a.C new file mode 100644 index 0000000..0e04d64 --- /dev/null +++ b/gcc/testsuite/g++.dg/template/non-dependent16a.C @@ -0,0 +1,36 @@ +// Like non-dependent16.C, but using member functions. + +// { dg-do compile { target c++11 } } + +struct A { + template<class T> static void f(); + template<class T> static auto f() -> decltype(f<void>(), 1, *T()); + template<class T> static auto f() -> decltype(f<void>(), 2, *T()); + template<class T> static auto f() -> decltype(f<void>(), 3, *T()); + template<class T> static auto f() -> decltype(f<void>(), 4, *T()); + template<class T> static auto f() -> decltype(f<void>(), 5, *T()); + template<class T> static auto f() -> decltype(f<void>(), 6, *T()); + template<class T> static auto f() -> decltype(f<void>(), 7, *T()); + template<class T> static auto f() -> decltype(f<void>(), 8, *T()); + template<class T> static auto f() -> decltype(f<void>(), 9, *T()); + template<class T> static auto f() -> decltype(f<void>(), 10, *T()); + template<class T> static auto f() -> decltype(f<void>(), 11, *T()); + template<class T> static auto f() -> decltype(f<void>(), 12, *T()); + template<class T> static auto f() -> decltype(f<void>(), 13, *T()); + template<class T> static auto f() -> decltype(f<void>(), 14, *T()); + template<class T> static auto f() -> decltype(f<void>(), 15, *T()); + template<class T> static auto f() -> decltype(f<void>(), 16, *T()); + template<class T> static auto f() -> decltype(f<void>(), 17, *T()); + template<class T> static auto f() -> decltype(f<void>(), 18, *T()); + template<class T> static auto f() -> decltype(f<void>(), 19, *T()); + template<class T> static auto f() -> decltype(f<void>(), 20, *T()); + template<class T> static auto f() -> decltype(f<void>(), 21, *T()); + template<class T> static auto f() -> decltype(f<void>(), 22, *T()); + template<class T> static auto f() -> decltype(f<void>(), 23, *T()); + template<class T> static auto f() -> decltype(f<void>(), 24, *T()); + template<class T> static auto f() -> decltype(f<void>(), 25, *T()); +}; + +int main() { + A::f<void>(); +} diff --git a/gcc/testsuite/g++.dg/template/non-dependent16b.C b/gcc/testsuite/g++.dg/template/non-dependent16b.C new file mode 100644 index 0000000..b0d1bbe --- /dev/null +++ b/gcc/testsuite/g++.dg/template/non-dependent16b.C @@ -0,0 +1,37 @@ +// Like non-dependent16a.C, but where A is a template. + +// { dg-do compile { target c++11 } } + +template<class...> +struct A { + template<class T> static void f(); + template<class T> static auto f() -> decltype(f<void>(), 1, *T()); + template<class T> static auto f() -> decltype(f<void>(), 2, *T()); + template<class T> static auto f() -> decltype(f<void>(), 3, *T()); + template<class T> static auto f() -> decltype(f<void>(), 4, *T()); + template<class T> static auto f() -> decltype(f<void>(), 5, *T()); + template<class T> static auto f() -> decltype(f<void>(), 6, *T()); + template<class T> static auto f() -> decltype(f<void>(), 7, *T()); + template<class T> static auto f() -> decltype(f<void>(), 8, *T()); + template<class T> static auto f() -> decltype(f<void>(), 9, *T()); + template<class T> static auto f() -> decltype(f<void>(), 10, *T()); + template<class T> static auto f() -> decltype(f<void>(), 11, *T()); + template<class T> static auto f() -> decltype(f<void>(), 12, *T()); + template<class T> static auto f() -> decltype(f<void>(), 13, *T()); + template<class T> static auto f() -> decltype(f<void>(), 14, *T()); + template<class T> static auto f() -> decltype(f<void>(), 15, *T()); + template<class T> static auto f() -> decltype(f<void>(), 16, *T()); + template<class T> static auto f() -> decltype(f<void>(), 17, *T()); + template<class T> static auto f() -> decltype(f<void>(), 18, *T()); + template<class T> static auto f() -> decltype(f<void>(), 19, *T()); + template<class T> static auto f() -> decltype(f<void>(), 20, *T()); + template<class T> static auto f() -> decltype(f<void>(), 21, *T()); + template<class T> static auto f() -> decltype(f<void>(), 22, *T()); + template<class T> static auto f() -> decltype(f<void>(), 23, *T()); + template<class T> static auto f() -> decltype(f<void>(), 24, *T()); + template<class T> static auto f() -> decltype(f<void>(), 25, *T()); +}; + +int main() { + A<>::f<void>(); +} diff --git a/gcc/testsuite/g++.dg/template/non-dependent17.C b/gcc/testsuite/g++.dg/template/non-dependent17.C new file mode 100644 index 0000000..6b62dd2a --- /dev/null +++ b/gcc/testsuite/g++.dg/template/non-dependent17.C @@ -0,0 +1,21 @@ +// A variant of deduce4.C with multiple overloads of foo. Verify we don't +// crash after ahead-of-time pruning of the overload set for the non-dependent +// call to foo. +// { dg-do compile } + +template <typename T> +struct S { + template <typename U, typename V> + static void foo(V) { } + template <typename U> + static void foo(...) { } + + void bar () { foo<int>(10); } +}; + +void +test () +{ + S<int> s; + s.bar (); +} diff --git a/gcc/testsuite/gcc.dg/pr103270.c b/gcc/testsuite/gcc.dg/pr103270.c new file mode 100644 index 0000000..819310e --- /dev/null +++ b/gcc/testsuite/gcc.dg/pr103270.c @@ -0,0 +1,19 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -fdump-tree-profile_estimate" } */ + +void test(int a, int* i) +{ + for (; a < 5; ++a) + { + int b = 0; + int c = 0; + for (; b != -11; b--) + for (int d = 0; d ==0; d++) + { + *i += c & a; + c = b; + } + } +} + +/* { dg-final { scan-tree-dump-not "extra loop exit heuristics of edge\[^:\]*:" "profile_estimate"} } */ diff --git a/gcc/testsuite/gcc.dg/tree-ssa/recip-3.c b/gcc/testsuite/gcc.dg/tree-ssa/recip-3.c index 638bf38..641c91e 100644 --- a/gcc/testsuite/gcc.dg/tree-ssa/recip-3.c +++ b/gcc/testsuite/gcc.dg/tree-ssa/recip-3.c @@ -23,4 +23,4 @@ float h () F[0] += E / d; } -/* { dg-final { scan-tree-dump-times " / " 1 "recip" } } */ +/* { dg-final { scan-tree-dump-times " / " 5 "recip" } } */ diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-lim-19.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-lim-19.c new file mode 100644 index 0000000..51c1913 --- /dev/null +++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-lim-19.c @@ -0,0 +1,29 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -fdump-tree-lim2-details" } */ + +volatile int x; +void +bar (int, char *, char *); +void +foo (int *a, int n, int m, int s, int t) +{ + int i; + int j; + int k; + + for (i = 0; i < m; i++) // Loop 1 + { + if (__builtin_expect (x, 0)) + for (j = 0; j < n; j++) // Loop 2 + for (k = 0; k < n; k++) // Loop 3 + { + bar (s / 5, "one", "two"); + a[t] = s; + } + a[t] = t; + } +} + +/* { dg-final { scan-tree-dump-times "out of loop 2" 4 "lim2" } } */ +/* { dg-final { scan-tree-dump-times "out of loop 1" 3 "lim2" } } */ + diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-lim-20.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-lim-20.c new file mode 100644 index 0000000..bc60a04 --- /dev/null +++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-lim-20.c @@ -0,0 +1,25 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -fdump-tree-lim2-details" } */ + +/* Test that `count' is not hoisted out of loop when bb is cold. */ + +int count; +volatile int x; + +struct obj { + int data; + struct obj *next; + +} *q; + +void +func (int m) +{ + struct obj *p; + for (int i = 0; i < m; i++) + if (__builtin_expect (x, 0)) + count++; + +} + +/* { dg-final { scan-tree-dump-not "Executing store motion of" "lim2" } } */ diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-lim-21.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-lim-21.c new file mode 100644 index 0000000..ffe6f8f --- /dev/null +++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-lim-21.c @@ -0,0 +1,35 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -fdump-tree-lim2-details" } */ + +/* Test that `data' and 'data1' is not hoisted out of inner loop and outer loop + when it is in cold loop. */ + +int count; +volatile int x; + +struct obj { + int data; + int data1; + struct obj *next; +}; + +void +func (int m, int n, int k, struct obj *a) +{ + struct obj *q = a; + for (int j = 0; j < m; j++) + if (__builtin_expect (m, 0)) + for (int i = 0; i < m; i++) + { + if (__builtin_expect (x, 0)) + { + count++; + q->data += 3; /* Not hoisted out to inner loop. */ + } + count += n; + q->data1 += k; /* Not hoisted out to outer loop. */ + } +} + +/* { dg-final { scan-tree-dump-not "Executing store motion of" "lim2" } } */ + diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-lim-22.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-lim-22.c new file mode 100644 index 0000000..16ba4ce --- /dev/null +++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-lim-22.c @@ -0,0 +1,32 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -fdump-tree-lim2-details" } */ + +volatile int x; +volatile int y; +void +bar (int, char *, char *); +void +foo (int *a, int n, int m, int s, int t) +{ + int i; + int j; + int k; + + for (i = 0; i < m; i++) // Loop 1 + { + if (__builtin_expect (x, 0)) + for (j = 0; j < n; j++) // Loop 2 + if (__builtin_expect (y, 0)) + for (k = 0; k < n; k++) // Loop 3 + { + bar (s / 5, "one", "two"); + a[t] = s; + } + a[t] = t; + } +} + +/* { dg-final { scan-tree-dump-times "out of loop 3" 4 "lim2" } } */ +/* { dg-final { scan-tree-dump-times "out of loop 1" 3 "lim2" } } */ + + diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-lim-23.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-lim-23.c new file mode 100644 index 0000000..e788074 --- /dev/null +++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-lim-23.c @@ -0,0 +1,21 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -fdump-tree-lim2-details" } */ + +volatile int x; +void +bar (int, char *, char *); +void +foo (int *a, int n, int k) +{ + int i; + + for (i = 0; i < n; i++) + { + if (__builtin_expect (x, 0)) + bar (k / 5, "one", "two"); + a[i] = k; + } +} + +/* { dg-final { scan-tree-dump-not "out of loop 1" "lim2" } } */ + diff --git a/gcc/testsuite/gcc.dg/vect/vect-simd-20.c b/gcc/testsuite/gcc.dg/vect/vect-simd-20.c index c85f05f..57217c8 100644 --- a/gcc/testsuite/gcc.dg/vect/vect-simd-20.c +++ b/gcc/testsuite/gcc.dg/vect/vect-simd-20.c @@ -18,7 +18,7 @@ foo (int s, int m, int n, int *p) return r; } -int p[10000 / 78 * 7]; +int p[((10000 / 78) + 1) * 7]; int main () diff --git a/gcc/testsuite/gcc.target/i386/bmi-1.c b/gcc/testsuite/gcc.target/i386/bmi-1.c index 738705e..141adaa 100644 --- a/gcc/testsuite/gcc.target/i386/bmi-1.c +++ b/gcc/testsuite/gcc.target/i386/bmi-1.c @@ -1,6 +1,6 @@ /* { dg-do compile } */ /* { dg-options "-O2 -fno-ipa-icf -mbmi " } */ -/* { dg-final { scan-assembler "andn\[^\\n]*eax" } } */ +/* { dg-final { scan-assembler-times "andn\[^\\n]*eax" 2 } } */ /* { dg-final { scan-assembler-times "bextr\[ \\t]+\[^\\n]*eax" 2 } } */ /* { dg-final { scan-assembler-times "blsi\[^\\n]*eax" 2 } } */ /* { dg-final { scan-assembler-times "blsmsk\[^\\n]*eax" 2 } } */ @@ -16,6 +16,12 @@ func_andn32 (unsigned int X, unsigned int Y) } unsigned int +func_andn32_2 (unsigned int X, unsigned int Y) +{ + return _andn_u32(X, Y); +} + +unsigned int func_bextr32 (unsigned int X, unsigned int Y) { return __bextr_u32(X, Y); diff --git a/gcc/testsuite/gcc.target/i386/bmi-2.c b/gcc/testsuite/gcc.target/i386/bmi-2.c index 6b8595e..3f9052a 100644 --- a/gcc/testsuite/gcc.target/i386/bmi-2.c +++ b/gcc/testsuite/gcc.target/i386/bmi-2.c @@ -1,6 +1,6 @@ /* { dg-do compile { target { ! ia32 } } } */ /* { dg-options "-O2 -fno-ipa-icf -mbmi " } */ -/* { dg-final { scan-assembler "andn\[^\\n]*rax" } } */ +/* { dg-final { scan-assembler-times "andn\[^\\n]*rax" 2 } } */ /* { dg-final { scan-assembler-times "bextr\[ \\t]+\[^\\n]*rax" 2 } } */ /* { dg-final { scan-assembler-times "blsi\[^\\n]*rax" 2 } } */ /* { dg-final { scan-assembler-times "blsmsk\[^\\n]*rax" 2 } } */ @@ -16,6 +16,12 @@ func_andn64 (unsigned long long X, unsigned long long Y) } unsigned long long +func_andn64_2 (unsigned long long X, unsigned long long Y) +{ + return _andn_u64 (X, Y); +} + +unsigned long long func_bextr64 (unsigned long long X, unsigned long long Y) { return __bextr_u64 (X, Y); diff --git a/gcc/testsuite/gcc.target/i386/bmi-3.c b/gcc/testsuite/gcc.target/i386/bmi-3.c index ddc5e0f..0b91bc2 100644 --- a/gcc/testsuite/gcc.target/i386/bmi-3.c +++ b/gcc/testsuite/gcc.target/i386/bmi-3.c @@ -1,6 +1,6 @@ /* { dg-do compile } */ /* { dg-options "-O2 -mbmi " } */ -/* { dg-final { scan-assembler "tzcntw\[^\\n]*(%|)ax" } } */ +/* { dg-final { scan-assembler-times "tzcntw\[^\\n]*%?ax" 2 } } */ #include <x86intrin.h> @@ -9,3 +9,9 @@ func_tzcnt16 (unsigned short X) { return __tzcnt_u16(X); } + +unsigned short +func_tzcnt16_2 (unsigned short X) +{ + return _tzcnt_u16(X); +} diff --git a/gcc/testsuite/gcc.target/i386/smuldi3_highpart.c b/gcc/testsuite/gcc.target/i386/smuldi3_highpart.c new file mode 100644 index 0000000..8bbd5f5 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/smuldi3_highpart.c @@ -0,0 +1,11 @@ +/* { dg-do compile { target int128 } } */ +/* { dg-options "-O2" } */ +typedef int __attribute ((mode(TI))) ti_t; + +long foo(long x) +{ + return ((ti_t)x * 19065) >> 72; +} + +/* { dg-final { scan-assembler "movl\[ \\t]+\\\$19065, %eax" } } */ +/* { dg-final { scan-assembler-times "movq" 1 } } */ diff --git a/gcc/testsuite/gdc.test/compilable/test22593.d b/gcc/testsuite/gdc.test/compilable/test22593.d new file mode 100644 index 0000000..2091294 --- /dev/null +++ b/gcc/testsuite/gdc.test/compilable/test22593.d @@ -0,0 +1,13 @@ +// https://issues.dlang.org/show_bug.cgi?id=22593 + +struct Foo(T){ + this(Rhs, this This)(scope Rhs rhs){ + } + + this(ref scope typeof(this) rhs){ + } +} + +struct Bar{ + Foo!int foo; +} diff --git a/gcc/testsuite/gdc.test/fail_compilation/ice17074.d b/gcc/testsuite/gdc.test/fail_compilation/ice17074.d index 53e75e4..84c4d85 100644 --- a/gcc/testsuite/gdc.test/fail_compilation/ice17074.d +++ b/gcc/testsuite/gdc.test/fail_compilation/ice17074.d @@ -1,19 +1,13 @@ /* -TEST_OUTPUT: ---- -fail_compilation/ice17074.d(9): Error: identifier expected for C++ namespace -fail_compilation/ice17074.d(9): Error: found `__overloadset` when expecting `)` -fail_compilation/ice17074.d(9): Error: declaration expected, not `)` ---- */ extern(C++, std.__overloadset) void ice_std_keyword(); /* TEST_OUTPUT: --- -fail_compilation/ice17074.d(19): Error: identifier expected for C++ namespace -fail_compilation/ice17074.d(19): Error: found `*` when expecting `)` -fail_compilation/ice17074.d(19): Error: declaration expected, not `)` +fail_compilation/ice17074.d(13): Error: identifier expected for C++ namespace +fail_compilation/ice17074.d(13): Error: found `*` when expecting `)` +fail_compilation/ice17074.d(13): Error: declaration expected, not `)` --- */ extern(C++, std.*) void ice_std_token(); diff --git a/gcc/testsuite/gdc.test/fail_compilation/test22593.d b/gcc/testsuite/gdc.test/fail_compilation/test22593.d new file mode 100644 index 0000000..f90287e --- /dev/null +++ b/gcc/testsuite/gdc.test/fail_compilation/test22593.d @@ -0,0 +1,23 @@ +// https://issues.dlang.org/show_bug.cgi?id=22593 + +/* +TEST_OUTPUT: +--- +fail_compilation/test22593.d(14): Error: Cannot define both an rvalue constructor and a copy constructor for `struct Foo` +fail_compilation/test22593.d(22): Template instance `__ctor!(immutable(Foo!int), immutable(Foo!int))` creates a rvalue constructor for `struct Foo` +fail_compilation/test22593.d(22): Error: template instance `test22593.Foo!int.Foo.__ctor!(immutable(Foo!int), immutable(Foo!int))` error instantiating +--- +*/ + +struct Foo(T) +{ + this(Rhs, this This)(scope Rhs rhs){} + + this(ref scope typeof(this) rhs){} +} + +void main() +{ + immutable Foo!int a; + a.__ctor(a); +} diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c index 682406d..b952386 100644 --- a/gcc/tree-ssa-loop-im.c +++ b/gcc/tree-ssa-loop-im.c @@ -146,6 +146,11 @@ public: enum dep_kind { lim_raw, sm_war, sm_waw }; enum dep_state { dep_unknown, dep_independent, dep_dependent }; +/* coldest outermost loop for given loop. */ +vec<class loop *> coldest_outermost_loop; +/* hotter outer loop nearest to given loop. */ +vec<class loop *> hotter_than_inner_loop; + /* Populate the loop dependence cache of REF for LOOP, KIND with STATE. */ static void @@ -417,6 +422,63 @@ movement_possibility (gimple *stmt) return ret; } +/* Compare the profile count inequality of bb and loop's preheader, it is + three-state as stated in profile-count.h, FALSE is returned if inequality + cannot be decided. */ +bool +bb_colder_than_loop_preheader (basic_block bb, class loop *loop) +{ + gcc_assert (bb && loop); + return bb->count < loop_preheader_edge (loop)->src->count; +} + +/* Check coldest loop between OUTERMOST_LOOP and LOOP by comparing profile + count. + It does three steps check: + 1) Check whether CURR_BB is cold in it's own loop_father, if it is cold, just + return NULL which means it should not be moved out at all; + 2) CURR_BB is NOT cold, check if pre-computed COLDEST_LOOP is outside of + OUTERMOST_LOOP, if it is inside of OUTERMOST_LOOP, return the COLDEST_LOOP; + 3) If COLDEST_LOOP is outside of OUTERMOST_LOOP, check whether there is a + hotter loop between OUTERMOST_LOOP and loop in pre-computed + HOTTER_THAN_INNER_LOOP, return it's nested inner loop, otherwise return + OUTERMOST_LOOP. + At last, the coldest_loop is inside of OUTERMOST_LOOP, just return it as + the hoist target. */ + +static class loop * +get_coldest_out_loop (class loop *outermost_loop, class loop *loop, + basic_block curr_bb) +{ + gcc_assert (outermost_loop == loop + || flow_loop_nested_p (outermost_loop, loop)); + + /* If bb_colder_than_loop_preheader returns false due to three-state + comparision, OUTERMOST_LOOP is returned finally to preserve the behavior. + Otherwise, return the coldest loop between OUTERMOST_LOOP and LOOP. */ + if (curr_bb && bb_colder_than_loop_preheader (curr_bb, loop)) + return NULL; + + class loop *coldest_loop = coldest_outermost_loop[loop->num]; + if (loop_depth (coldest_loop) < loop_depth (outermost_loop)) + { + class loop *hotter_loop = hotter_than_inner_loop[loop->num]; + if (!hotter_loop + || loop_depth (hotter_loop) < loop_depth (outermost_loop)) + return outermost_loop; + + /* hotter_loop is between OUTERMOST_LOOP and LOOP like: + [loop tree root, ..., coldest_loop, ..., outermost_loop, ..., + hotter_loop, second_coldest_loop, ..., loop] + return second_coldest_loop to be the hoist target. */ + class loop *aloop; + for (aloop = hotter_loop->inner; aloop; aloop = aloop->next) + if (aloop == loop || flow_loop_nested_p (aloop, loop)) + return aloop; + } + return coldest_loop; +} + /* Suppose that operand DEF is used inside the LOOP. Returns the outermost loop to that we could move the expression using DEF if it did not have other operands, i.e. the outermost loop enclosing LOOP in that the value @@ -685,7 +747,9 @@ determine_max_movement (gimple *stmt, bool must_preserve_exec) level = ALWAYS_EXECUTED_IN (bb); else level = superloop_at_depth (loop, 1); - lim_data->max_loop = level; + lim_data->max_loop = get_coldest_out_loop (level, loop, bb); + if (!lim_data->max_loop) + return false; if (gphi *phi = dyn_cast <gphi *> (stmt)) { @@ -1217,7 +1281,10 @@ move_computations_worker (basic_block bb) /* We do not really want to move conditionals out of the loop; we just placed it here to force its operands to be moved if necessary. */ if (gimple_code (stmt) == GIMPLE_COND) - continue; + { + gsi_next (&bsi); + continue; + } if (dump_file && (dump_flags & TDF_DETAILS)) { @@ -3023,6 +3090,26 @@ ref_indep_loop_p (class loop *loop, im_mem_ref *ref, dep_kind kind) return indep_p; } +class ref_in_loop_hot_body +{ +public: + ref_in_loop_hot_body (class loop *loop_) : l (loop_) {} + bool operator () (mem_ref_loc *loc); + class loop *l; +}; + +/* Check the coldest loop between loop L and innermost loop. If there is one + cold loop between L and INNER_LOOP, store motion can be performed, otherwise + no cold loop means no store motion. get_coldest_out_loop also handles cases + when l is inner_loop. */ +bool +ref_in_loop_hot_body::operator () (mem_ref_loc *loc) +{ + basic_block curr_bb = gimple_bb (loc->stmt); + class loop *inner_loop = curr_bb->loop_father; + return get_coldest_out_loop (l, inner_loop, curr_bb); +} + /* Returns true if we can perform store motion of REF from LOOP. */ @@ -3077,6 +3164,12 @@ can_sm_ref_p (class loop *loop, im_mem_ref *ref) if (!ref_indep_loop_p (loop, ref, sm_war)) return false; + /* Verify whether the candidate is hot for LOOP. Only do store motion if the + candidate's profile count is hot. Statement in cold BB shouldn't be moved + out of it's loop_father. */ + if (!for_all_locs_in_loop (loop, ref, ref_in_loop_hot_body (loop))) + return false; + return true; } @@ -3289,6 +3382,48 @@ fill_always_executed_in (void) fill_always_executed_in_1 (loop, contains_call); } +/* Find the coldest loop preheader for LOOP, also find the nearest hotter loop + to LOOP. Then recursively iterate each inner loop. */ + +void +fill_coldest_and_hotter_out_loop (class loop *coldest_loop, + class loop *hotter_loop, class loop *loop) +{ + if (bb_colder_than_loop_preheader (loop_preheader_edge (loop)->src, + coldest_loop)) + coldest_loop = loop; + + coldest_outermost_loop[loop->num] = coldest_loop; + + hotter_than_inner_loop[loop->num] = NULL; + class loop *outer_loop = loop_outer (loop); + if (hotter_loop + && bb_colder_than_loop_preheader (loop_preheader_edge (loop)->src, + hotter_loop)) + hotter_than_inner_loop[loop->num] = hotter_loop; + + if (outer_loop && outer_loop != current_loops->tree_root + && bb_colder_than_loop_preheader (loop_preheader_edge (loop)->src, + outer_loop)) + hotter_than_inner_loop[loop->num] = outer_loop; + + if (dump_enabled_p ()) + { + dump_printf (MSG_NOTE, "loop %d's coldest_outermost_loop is %d, ", + loop->num, coldest_loop->num); + if (hotter_than_inner_loop[loop->num]) + dump_printf (MSG_NOTE, "hotter_than_inner_loop is %d\n", + hotter_than_inner_loop[loop->num]->num); + else + dump_printf (MSG_NOTE, "hotter_than_inner_loop is NULL\n"); + } + + class loop *inner_loop; + for (inner_loop = loop->inner; inner_loop; inner_loop = inner_loop->next) + fill_coldest_and_hotter_out_loop (coldest_loop, + hotter_than_inner_loop[loop->num], + inner_loop); +} /* Compute the global information needed by the loop invariant motion pass. */ @@ -3373,6 +3508,9 @@ tree_ssa_lim_finalize (void) free_affine_expand_cache (&memory_accesses.ttae_cache); free (bb_loop_postorder); + + coldest_outermost_loop.release (); + hotter_than_inner_loop.release (); } /* Moves invariants from loops. Only "expensive" invariants are moved out -- @@ -3392,6 +3530,16 @@ loop_invariant_motion_in_fun (function *fun, bool store_motion) /* Fills ALWAYS_EXECUTED_IN information for basic blocks. */ fill_always_executed_in (); + /* Pre-compute coldest outermost loop and nearest hotter loop of each loop. + */ + class loop *loop; + coldest_outermost_loop.create (number_of_loops (cfun)); + coldest_outermost_loop.safe_grow_cleared (number_of_loops (cfun)); + hotter_than_inner_loop.create (number_of_loops (cfun)); + hotter_than_inner_loop.safe_grow_cleared (number_of_loops (cfun)); + for (loop = current_loops->tree_root->inner; loop != NULL; loop = loop->next) + fill_coldest_and_hotter_out_loop (loop, NULL, loop); + int *rpo = XNEWVEC (int, last_basic_block_for_fn (fun)); int n = pre_and_rev_post_order_compute_fn (fun, NULL, rpo, false); diff --git a/gcc/tree-ssa-loop-split.c b/gcc/tree-ssa-loop-split.c index 3f6ad04..3312806 100644 --- a/gcc/tree-ssa-loop-split.c +++ b/gcc/tree-ssa-loop-split.c @@ -575,7 +575,10 @@ split_loop (class loop *loop1) stmts2); tree cond = build2 (guard_code, boolean_type_node, guard_init, border); if (!initial_true) - cond = fold_build1 (TRUTH_NOT_EXPR, boolean_type_node, cond); + cond = fold_build1 (TRUTH_NOT_EXPR, boolean_type_node, cond); + + edge true_edge, false_edge; + extract_true_false_edges_from_block (bbs[i], &true_edge, &false_edge); /* Now version the loop, placing loop2 after loop1 connecting them, and fix up SSA form for that. */ @@ -583,11 +586,11 @@ split_loop (class loop *loop1) basic_block cond_bb; class loop *loop2 = loop_version (loop1, cond, &cond_bb, - profile_probability::always (), - profile_probability::always (), - profile_probability::always (), - profile_probability::always (), - true); + true_edge->probability, + true_edge->probability.invert (), + profile_probability::always (), + profile_probability::always (), + true); gcc_assert (loop2); edge new_e = connect_loops (loop1, loop2); @@ -607,6 +610,38 @@ split_loop (class loop *loop1) tree guard_next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop1)); patch_loop_exit (loop1, guard_stmt, guard_next, newend, initial_true); + update_ssa (TODO_update_ssa); + + /* Proportion first loop's bb counts except those dominated by true + branch to avoid drop 1s down. */ + basic_block *bbs1, *bbs2; + bbs1 = get_loop_body (loop1); + unsigned j; + for (j = 0; j < loop1->num_nodes; j++) + if (bbs1[j] == loop1->latch + || !dominated_by_p (CDI_DOMINATORS, bbs1[j], true_edge->dest)) + bbs1[j]->count + = bbs1[j]->count.apply_probability (true_edge->probability); + free (bbs1); + + /* Fix first loop's exit probability after scaling. */ + edge exit_to_latch1 = single_pred_edge (loop1->latch); + exit_to_latch1->probability = exit_to_latch1->probability.apply_scale ( + true_edge->probability.to_reg_br_prob_base (), REG_BR_PROB_BASE); + single_exit (loop1)->probability + = exit_to_latch1->probability.invert (); + + /* Proportion second loop's bb counts except those dominated by false + branch to avoid drop 1s down. */ + basic_block bbi_copy = get_bb_copy (false_edge->dest); + bbs2 = get_loop_body (loop2); + for (j = 0; j < loop2->num_nodes; j++) + if (bbs2[j] == loop2->latch + || !dominated_by_p (CDI_DOMINATORS, bbs2[j], bbi_copy)) + bbs2[j]->count = bbs2[j]->count.apply_probability ( + true_edge->probability.invert ()); + free (bbs2); + /* Finally patch out the two copies of the condition to be always true/false (or opposite). */ gcond *force_true = as_a<gcond *> (last_stmt (bbs[i])); @@ -1486,8 +1521,8 @@ do_split_loop_on_cond (struct loop *loop1, edge invar_branch) initialize_original_copy_tables (); struct loop *loop2 = loop_version (loop1, boolean_true_node, NULL, - profile_probability::always (), - profile_probability::never (), + invar_branch->probability.invert (), + invar_branch->probability, profile_probability::always (), profile_probability::always (), true); @@ -1535,6 +1570,40 @@ do_split_loop_on_cond (struct loop *loop1, edge invar_branch) between loop1 and loop2. */ connect_loop_phis (loop1, loop2, to_loop2); + update_ssa (TODO_update_ssa); + + edge true_edge, false_edge, skip_edge1, skip_edge2; + extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge); + + /* Proportion first loop's bb counts except those dominated by true + branch to avoid drop 1s down. */ + skip_edge1 = true_invar ? false_edge : true_edge; + skip_edge2 = true_invar ? true_edge : false_edge; + basic_block *bbs1, *bbs2; + bbs1 = get_loop_body (loop1); + unsigned j; + for (j = 0; j < loop1->num_nodes; j++) + if (bbs1[j] == loop1->latch + || !dominated_by_p (CDI_DOMINATORS, bbs1[j], skip_edge1->dest)) + bbs1[j]->count + = bbs1[j]->count.apply_probability (skip_edge1->probability); + free (bbs1); + + /* Fix first loop's exit probability after scaling. */ + to_loop1->probability = invar_branch->probability.invert (); + to_loop2->probability = invar_branch->probability; + + /* Proportion second loop's bb counts except those dominated by false + branch to avoid drop 1s down. */ + basic_block bbi_copy = get_bb_copy (skip_edge2->dest); + bbs2 = get_loop_body (loop2); + for (j = 0; j < loop2->num_nodes; j++) + if (bbs2[j] == loop2->latch + || !dominated_by_p (CDI_DOMINATORS, bbs2[j], bbi_copy)) + bbs2[j]->count + = bbs2[j]->count.apply_probability (skip_edge2->probability); + free (bbs2); + free_original_copy_tables (); return true; diff --git a/libcpp/po/ChangeLog b/libcpp/po/ChangeLog index 843a84b..54ca87f 100644 --- a/libcpp/po/ChangeLog +++ b/libcpp/po/ChangeLog @@ -1,3 +1,7 @@ +2021-12-20 Joseph Myers <joseph@codesourcery.com> + + * es.po: Update. + 2021-08-16 Joseph Myers <joseph@codesourcery.com> * de.po: Update. diff --git a/libcpp/po/es.po b/libcpp/po/es.po index fde067a..bd50130 100644 --- a/libcpp/po/es.po +++ b/libcpp/po/es.po @@ -6,10 +6,10 @@ # Antonio Ceballos Roa <aceballos@gmail.com>, 2021. msgid "" msgstr "" -"Project-Id-Version: cpplib 10.1-b20200209\n" +"Project-Id-Version: cpplib 11.1-b20210207\n" "Report-Msgid-Bugs-To: https://gcc.gnu.org/bugs/\n" "POT-Creation-Date: 2021-02-05 21:38+0000\n" -"PO-Revision-Date: 2021-01-07 11:33+0100\n" +"PO-Revision-Date: 2021-12-18 09:17+0100\n" "Last-Translator: Antonio Ceballos Roa <aceballos@gmail.com>\n" "Language-Team: Spanish <es@tp.org.es>\n" "Language: es\n" @@ -491,36 +491,28 @@ msgid "use of C99 long long integer constant" msgstr "uso de una constante entera long long C99" #: expr.c:822 -#, fuzzy -#| msgid "use of C++11 long long integer constant" msgid "use of C++23 %<size_t%> integer constant" -msgstr "uso de una constante entera long long C++11" +msgstr "uso de constante entera %<size_t%> de C++23" #: expr.c:823 -#, fuzzy -#| msgid "use of C++11 long long integer constant" msgid "use of C++23 %<make_signed_t<size_t>%> integer constant" -msgstr "uso de una constante entera long long C++11" +msgstr "uso de constante entera %<make_signed_t<size_t>%> de C++23" #: expr.c:834 msgid "imaginary constants are a GCC extension" -msgstr "las constantes imaginarias son una extensión GCC" +msgstr "las constantes imaginarias son una extensión de GCC" #: expr.c:841 msgid "binary constants are a C++14 feature or GCC extension" -msgstr "las constantes binarias son una característica C++14 o extensión GCC" +msgstr "las constantes binarias son una característica C++14 o una extensión de GCC" #: expr.c:843 -#, fuzzy -#| msgid "binary constants are a C++14 feature or GCC extension" msgid "binary constants are a C2X feature or GCC extension" -msgstr "las constantes binarias son una característica C++14 o extensión GCC" +msgstr "las constantes binarias son una característica de C2X o una extensión de GCC" #: expr.c:848 -#, fuzzy -#| msgid "decimal float constants are a C2X feature" msgid "binary constants are a C2X feature" -msgstr "las constantes de coma flotante decimal son una característica de C2X" +msgstr "las constantes binarias son una característica de C2X" #: expr.c:944 msgid "integer constant is too large for its type" @@ -713,16 +705,12 @@ msgid "`%.*s' is not in NFC" msgstr "`%.*s' no está en NFC" #: lex.c:1375 -#, fuzzy -#| msgid "__VA_OPT__ is not available until C++2a" msgid "__VA_OPT__ is not available until C++20" -msgstr "__VA_OPT__ no está disponible bajo C++2a" +msgstr "__VA_OPT__ no está disponible hasta C++20" #: lex.c:1382 -#, fuzzy -#| msgid "__VA_OPT__ can only appear in the expansion of a C++2a variadic macro" msgid "__VA_OPT__ can only appear in the expansion of a C++20 variadic macro" -msgstr "__VA_OPT__ solamente puede aparecer en la expansión de una macro variadic C++2a" +msgstr "__VA_OPT__ solamente puede aparecer en la expansión de una macro variadic de C++20" #: lex.c:1413 lex.c:1506 #, c-format @@ -778,12 +766,12 @@ msgstr "C++11 requiere un espacio entre cadena literal y macro" #: lex.c:2711 msgid "module control-line cannot be in included file" -msgstr "" +msgstr "la línea de control del módulo no puede estar en un fichero incluido" #: lex.c:2725 #, c-format msgid "module control-line \"%s\" cannot be an object-like macro" -msgstr "" +msgstr "la línea de control del módulo «%s» no puede ser una macro de tipo objeto" #: lex.c:3099 lex.c:4472 traditional.c:174 msgid "unterminated comment" @@ -811,16 +799,13 @@ msgid "unspellable token %s" msgstr "elemento %s impronunciable" #: lex.c:4627 -#, fuzzy, c-format -#| msgid "raw string delimiter longer than 16 characters" +#, c-format msgid "raw string delimiter longer than %d characters" -msgstr "el delimitador de cadena cruda es más largo que 16 caracteres" +msgstr "el delimitador de cadena cruda es mayor de %d caracteres" #: lex.c:4697 -#, fuzzy -#| msgid "unterminated #%s" msgid "unterminated literal" -msgstr "#%s sin terminar" +msgstr "literal sin terminar" #: macro.c:94 msgid "'##' cannot appear at either end of __VA_OPT__" diff --git a/libphobos/ChangeLog b/libphobos/ChangeLog index e07738f..30bd0a9 100644 --- a/libphobos/ChangeLog +++ b/libphobos/ChangeLog @@ -1,3 +1,12 @@ +2021-12-21 Iain Buclaw <ibuclaw@gdcproject.org> + + * configure.tgt: Add power*-*-freebsd* as a supported target. + +2021-12-20 Iain Buclaw <ibuclaw@gdcproject.org> + + * libdruntime/MERGE: Merge upstream druntime fd9a4544. + * src/MERGE: Merge upstream phobos 495e835c2. + 2021-12-15 Iain Buclaw <ibuclaw@gdcproject.org> PR d/103604 diff --git a/libphobos/configure.tgt b/libphobos/configure.tgt index 88c027d..0643dae 100644 --- a/libphobos/configure.tgt +++ b/libphobos/configure.tgt @@ -39,6 +39,9 @@ case "${target}" in mips*-*-linux*) LIBPHOBOS_SUPPORTED=yes ;; + power*-*-freebsd*) + LIBPHOBOS_SUPPORTED=yes + ;; power*-*-linux*) LIBPHOBOS_SUPPORTED=yes LIBDRUNTIME_ONLY=yes diff --git a/libphobos/libdruntime/MERGE b/libphobos/libdruntime/MERGE index b3da906..70f7ff5 100644 --- a/libphobos/libdruntime/MERGE +++ b/libphobos/libdruntime/MERGE @@ -1,4 +1,4 @@ -6364e010bc87f3621028c8ac648133535c126fb3 +fd9a45448244fb9dd4326520ad8526c540895eb0 The first line of this file holds the git revision number of the last merge done from the dlang/druntime repository. diff --git a/libphobos/libdruntime/core/builtins.d b/libphobos/libdruntime/core/builtins.d index f2ca503..1ed80f7 100644 --- a/libphobos/libdruntime/core/builtins.d +++ b/libphobos/libdruntime/core/builtins.d @@ -1,11 +1,45 @@ /********************************************** - * This module implements common builtins for the D frontend. - * - * Copyright: Copyright © 2019, The D Language Foundation - * License: $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost License 1.0) - * Authors: Walter Bright - * Source: $(DRUNTIMESRC core/builtins.d) - */ +To provide access to features that would be otherwise counterproductive or +difficult to implement, compilers provide an interface consisting of a set +of builtins (also called intrinsics) which can be called like normal functions. + +This module exposes builtins both common to all D compilers +(those provided by the frontend) and specific to the host compiler i.e. those +specific to either LLVM or GCC (`ldc.intrinsics` and `gcc.builtins` are publicly imported, respectively). +Host-specific intrinsics cannot be reliably listed here, however listings can be found +at the documentation for the relevant backends, i.e. +$(LINK2 https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html, GCC) and +$(LINK2 https://llvm.org/docs/LangRef.html, LLVM). It should be noted that not all +builtins listed are necessarily supported by the host compiler, please file a bug +if this is the case for your workload. + +Use of this module reduces the amount of conditional compilation needed +to use a given builtin. For example, to write a target independent function +that uses prefetching we can write the following: +--- +float usePrefetch(float[] x) +{ + // There is only one import statement required rather than two (versioned) imports + import core.builtins; + version (GNU) + __builtin_prefetch(x.ptr); + version (LDC) + /+ + For the curious: 0, 3, 1 mean `x` will only be read-from (0), it will be used + very often (3), and it should be fetched to the data-cache (1). + +/ + llvm_prefetch(x.ptr, 0, 3, 1); + const doMath = blahBlahBlah; + return doMath; +} +--- + + +Copyright: Copyright © 2021, The D Language Foundation +License: $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost License 1.0) +Authors: Walter Bright +Source: $(DRUNTIMESRC core/builtins.d) +*/ module core.builtins; diff --git a/libphobos/libdruntime/core/lifetime.d b/libphobos/libdruntime/core/lifetime.d index 8fb61a5..9a99f2d 100644 --- a/libphobos/libdruntime/core/lifetime.d +++ b/libphobos/libdruntime/core/lifetime.d @@ -1545,9 +1545,8 @@ template forward(args...) { import core.internal.traits : AliasSeq; - static if (args.length) + template fwd(alias arg) { - alias arg = args[0]; // by ref || lazy || const/immutable static if (__traits(isRef, arg) || __traits(isOut, arg) || @@ -1556,15 +1555,16 @@ template forward(args...) alias fwd = arg; // (r)value else - @property auto fwd(){ return move(arg); } - - static if (args.length == 1) - alias forward = fwd; - else - alias forward = AliasSeq!(fwd, forward!(args[1..$])); + @property auto fwd(){ pragma(inline, true); return move(arg); } } + + alias Result = AliasSeq!(); + static foreach (arg; args) + Result = AliasSeq!(Result, fwd!arg); + static if (Result.length == 1) + alias forward = Result[0]; else - alias forward = AliasSeq!(); + alias forward = Result; } /// @@ -2316,7 +2316,7 @@ template _d_delstructImpl(T) @system pure nothrow unittest { int dtors = 0; - struct S { ~this() { ++dtors; } } + struct S { ~this() nothrow { ++dtors; } } S *s = new S(); _d_delstructImpl!(typeof(s))._d_delstruct(s); diff --git a/libphobos/libdruntime/core/sys/linux/sched.d b/libphobos/libdruntime/core/sys/linux/sched.d index dc815a0..e828b74 100644 --- a/libphobos/libdruntime/core/sys/linux/sched.d +++ b/libphobos/libdruntime/core/sys/linux/sched.d @@ -153,6 +153,9 @@ version (CRuntime_Glibc) int sched_getcpu(); } +/* Reassociate the calling thread with namespace referred to by fd */ +int setns(int fd, int nstype); + enum CLONE_FILES = 0x400; enum CLONE_FS = 0x200; enum CLONE_NEWCGROUP = 0x2000000; diff --git a/libphobos/libdruntime/object.d b/libphobos/libdruntime/object.d index 29b5d58..c989caa 100644 --- a/libphobos/libdruntime/object.d +++ b/libphobos/libdruntime/object.d @@ -4667,17 +4667,33 @@ public import core.internal.switch_: __switch_error; public @trusted @nogc nothrow pure extern (C) void _d_delThrowable(scope Throwable); // Compare class and interface objects for ordering. -private int __cmp(Obj)(Obj lhs, Obj rhs) -if (is(Obj : Object)) +int __cmp(C1, C2)(C1 lhs, C2 rhs) +if ((is(C1 : const(Object)) || (is(C1 == interface) && (__traits(getLinkage, C1) == "D"))) && + (is(C2 : const(Object)) || (is(C2 == interface) && (__traits(getLinkage, C2) == "D")))) { - if (lhs is rhs) + static if (is(C1 == typeof(null)) && is(C2 == typeof(null))) + { return 0; - // Regard null references as always being "less than" - if (!lhs) + } + else static if (is(C1 == typeof(null))) + { + // Regard null references as always being "less than" return -1; - if (!rhs) + } + else static if (is(C2 == typeof(null))) + { return 1; - return lhs.opCmp(rhs); + } + else + { + if (lhs is rhs) + return 0; + if (lhs is null) + return -1; + if (rhs is null) + return 1; + return lhs.opCmp(rhs); + } } // objects diff --git a/libphobos/src/MERGE b/libphobos/src/MERGE index c9d166b..b517749 100644 --- a/libphobos/src/MERGE +++ b/libphobos/src/MERGE @@ -1,4 +1,4 @@ -575b67a9b4f78415f96ca77ad50b2de4c667cc74 +495e835c2da47606142ff24c85de707e3b955a9a The first line of this file holds the git revision number of the last merge done from the dlang/phobos repository. diff --git a/libphobos/src/std/format/write.d b/libphobos/src/std/format/write.d index c758768..e67d95c 100644 --- a/libphobos/src/std/format/write.d +++ b/libphobos/src/std/format/write.d @@ -1287,3 +1287,26 @@ void formatValue(Writer, T, Char)(auto ref Writer w, auto ref T val, scope const assertThrown!FormatException(formattedWrite(w, "%(%0*d%)", new int[1])); } + +// https://issues.dlang.org/show_bug.cgi?id=22609 +@safe pure unittest +{ + static enum State: ubyte { INACTIVE } + static struct S { + State state = State.INACTIVE; + int generation = 1; + alias state this; + // DMDBUG: https://issues.dlang.org/show_bug.cgi?id=16657 + auto opEquals(S other) const { return state == other.state && generation == other.generation; } + auto opEquals(State other) const { return state == other; } + } + + import std.array : appender; + import std.format.spec : singleSpec; + + auto writer = appender!string(); + const spec = singleSpec("%s"); + S a; + writer.formatValue(a, spec); + assert(writer.data == "0"); +} diff --git a/libphobos/src/std/range/interfaces.d b/libphobos/src/std/range/interfaces.d index 475f35b..6d55d414 100644 --- a/libphobos/src/std/range/interfaces.d +++ b/libphobos/src/std/range/interfaces.d @@ -201,6 +201,9 @@ interface RandomAccessFinite(E) : BidirectionalRange!(E) { /**Interface for an infinite random access range of type `E`.*/ interface RandomAccessInfinite(E) : ForwardRange!E { + /// + enum bool empty = false; + /**Calls $(REF moveAt, std, range, primitives) on the wrapped range, if * possible. Otherwise, throws an $(LREF UnsupportedRangeMethod) exception. */ @@ -213,6 +216,12 @@ interface RandomAccessInfinite(E) : ForwardRange!E { E opIndex(size_t); } +// https://issues.dlang.org/show_bug.cgi?id=22608 +@safe unittest +{ + static assert(isRandomAccessRange!(RandomAccessInfinite!int)); +} + /**Adds assignable elements to InputRange.*/ interface InputAssignable(E) : InputRange!E { /// diff --git a/libphobos/src/std/typecons.d b/libphobos/src/std/typecons.d index 6dee863..cde2b9d 100644 --- a/libphobos/src/std/typecons.d +++ b/libphobos/src/std/typecons.d @@ -6971,7 +6971,7 @@ mixin template Proxy(alias a) static if (is(typeof(a.opCmp(b)))) return a.opCmp(b); else static if (is(typeof(b.opCmp(a)))) - return -b.opCmp(b); + return -b.opCmp(a); else return a < b ? -1 : a > b ? +1 : 0; } |