From 49e651990a6966936a0273138dd56ac394e57b16 Mon Sep 17 00:00:00 2001 From: Richard Sandiford Date: Fri, 16 Apr 2021 12:38:02 +0100 Subject: Mark untyped calls and handle them specially [PR98689] This patch fixes a regression introduced by the rtl-ssa patches. It was seen on HPPA but it might be latent elsewhere. The problem is that the traditional way of expanding an untyped_call is to emit sequences like: (call (mem (symbol_ref "foo"))) (set (reg pseudo1) (reg result1)) ... (set (reg pseudon) (reg resultn)) The ABI specifies that result1..resultn are clobbered by the call but nothing in the RTL indicates that result1..resultn are the results of the call. Normally, using a clobbered value gives undefined results, but in this case the results are well-defined and matter for correctness. This seems like a niche case, so I think it would be better to mark it explicitly rather than try to detect it heuristically. Note that in expand_builtin_apply we already have an rtx_insn *, so it doesn't matter whether we call emit_call_insn or emit_insn. Calling emit_insn seems more natural now that the gen_* call has been split out. It also matches later code in the function. gcc/ PR rtl-optimization/98689 * reg-notes.def (UNTYPED_CALL): New note. * combine.c (distribute_notes): Handle it. * emit-rtl.c (try_split): Likewise. * rtlanal.c (rtx_properties::try_to_add_insn): Likewise. Assume that calls with the note implicitly set all return value registers. * builtins.c (expand_builtin_apply): Add a REG_UNTYPED_CALL to untyped_calls. --- gcc/builtins.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index 196dda3..d30c4eb6 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -2490,8 +2490,12 @@ expand_builtin_apply (rtx function, rtx arguments, rtx argsize) if (targetm.have_untyped_call ()) { rtx mem = gen_rtx_MEM (FUNCTION_MODE, function); - emit_call_insn (targetm.gen_untyped_call (mem, result, - result_vector (1, result))); + rtx_insn *seq = targetm.gen_untyped_call (mem, result, + result_vector (1, result)); + for (rtx_insn *insn = seq; insn; insn = NEXT_INSN (insn)) + if (CALL_P (insn)) + add_reg_note (insn, REG_UNTYPED_CALL, NULL_RTX); + emit_insn (seq); } else if (targetm.have_call_value ()) { -- cgit v1.1 From 3bb6a9c01f1e9b5daf9b37fca57e90804ba90d66 Mon Sep 17 00:00:00 2001 From: Martin Liska Date: Thu, 22 Apr 2021 11:32:29 +0200 Subject: Fix various typos. PR testsuite/100159 PR testsuite/100192 gcc/ChangeLog: * builtins.c (expand_builtin): Fix typos and missing comments. * dwarf2out.c (gen_subprogram_die): Likewise. (gen_struct_or_union_type_die): Likewise. gcc/fortran/ChangeLog: * frontend-passes.c (optimize_expr): Fix typos and missing comments. gcc/testsuite/ChangeLog: * g++.dg/template/nontype29.C: Fix typos and missing comments. * gcc.dg/Warray-bounds-64.c: Likewise. * gcc.dg/Warray-parameter.c: Likewise. * gcc.dg/Wstring-compare.c: Likewise. * gcc.dg/format/gcc_diag-11.c: Likewise. * gfortran.dg/array_constructor_3.f90: Likewise. * gfortran.dg/matmul_bounds_9.f90: Likewise. * gfortran.dg/pr78033.f90: Likewise. * gfortran.dg/pr96325.f90: Likewise. --- gcc/builtins.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index d30c4eb6..8c5324b 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -9986,7 +9986,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode, break; /* Expand it as BUILT_IN_MEMCMP_EQ first. If not successful, change it - back to a BUILT_IN_STRCMP. Remember to delete the 3rd paramater + back to a BUILT_IN_STRCMP. Remember to delete the 3rd parameter when changing it to a strcmp call. */ case BUILT_IN_STRCMP_EQ: target = expand_builtin_memcmp (exp, target, true); -- cgit v1.1 From 985b3a6837dee7001e6b618f073ed74f0edf5787 Mon Sep 17 00:00:00 2001 From: "H.J. Lu" Date: Mon, 10 Jun 2019 09:57:15 -0700 Subject: Generate offset adjusted operation for op_by_pieces operations Add an overlap_op_by_pieces_p target hook for op_by_pieces operations between two areas of memory to generate one offset adjusted operation in the smallest integer mode for the remaining bytes on the last piece operation of a memory region to avoid doing more than one smaller operations. Pass the RTL information from the previous iteration to m_constfn in op_by_pieces operation so that builtin_memset_[read|gen]_str can generate the new RTL from the previous RTL. Tested on Linux/x86-64. gcc/ PR middle-end/90773 * builtins.c (builtin_memcpy_read_str): Add a dummy argument. (builtin_strncpy_read_str): Likewise. (builtin_memset_read_str): Add an argument for the previous RTL information and generate the new RTL from the previous RTL info. (builtin_memset_gen_str): Likewise. * builtins.h (builtin_strncpy_read_str): Update the prototype. (builtin_memset_read_str): Likewise. * expr.c (by_pieces_ninsns): If targetm.overlap_op_by_pieces_p() returns true, round up size and alignment to the widest integer mode for maximum size. (pieces_addr::adjust): Add a pointer to by_pieces_prev argument and pass it to m_constfn. (op_by_pieces_d): Add m_push and m_overlap_op_by_pieces. (op_by_pieces_d::op_by_pieces_d): Add a bool argument to initialize m_push. Initialize m_overlap_op_by_pieces with targetm.overlap_op_by_pieces_p (). (op_by_pieces_d::run): Pass the previous RTL information to pieces_addr::adjust and generate overlapping operations if m_overlap_op_by_pieces is true. (PUSHG_P): New. (move_by_pieces_d::move_by_pieces_d): Updated for op_by_pieces_d change. (store_by_pieces_d::store_by_pieces_d): Updated for op_by_pieces_d change. (can_store_by_pieces): Use by_pieces_constfn on constfun. (store_by_pieces): Use by_pieces_constfn on constfun. Updated for op_by_pieces_d change. (clear_by_pieces_1): Add a dummy argument. (clear_by_pieces): Updated for op_by_pieces_d change. (compare_by_pieces_d::compare_by_pieces_d): Likewise. (string_cst_read_str): Add a dummy argument. * expr.h (by_pieces_constfn): Add a dummy argument. (by_pieces_prev): New. * target.def (overlap_op_by_pieces_p): New target hook. * config/i386/i386.c (TARGET_OVERLAP_OP_BY_PIECES_P): New. * doc/tm.texi.in: Add TARGET_OVERLAP_OP_BY_PIECES_P. * doc/tm.texi: Regenerated. gcc/testsuite/ PR middle-end/90773 * g++.dg/pr90773-1.h: New test. * g++.dg/pr90773-1a.C: Likewise. * g++.dg/pr90773-1b.C: Likewise. * g++.dg/pr90773-1c.C: Likewise. * g++.dg/pr90773-1d.C: Likewise. * gcc.target/i386/pr90773-1.c: Likewise. * gcc.target/i386/pr90773-2.c: Likewise. * gcc.target/i386/pr90773-3.c: Likewise. * gcc.target/i386/pr90773-4.c: Likewise. * gcc.target/i386/pr90773-5.c: Likewise. * gcc.target/i386/pr90773-6.c: Likewise. * gcc.target/i386/pr90773-7.c: Likewise. * gcc.target/i386/pr90773-8.c: Likewise. * gcc.target/i386/pr90773-9.c: Likewise. * gcc.target/i386/pr90773-10.c: Likewise. * gcc.target/i386/pr90773-11.c: Likewise. * gcc.target/i386/pr90773-12.c: Likewise. * gcc.target/i386/pr90773-13.c: Likewise. * gcc.target/i386/pr90773-14.c: Likewise. --- gcc/builtins.c | 36 ++++++++++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 8 deletions(-) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index 8c5324b..2d6bf4a 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -128,7 +128,6 @@ static rtx expand_builtin_va_copy (tree); static rtx inline_expand_builtin_bytecmp (tree, rtx); static rtx expand_builtin_strcmp (tree, rtx); static rtx expand_builtin_strncmp (tree, rtx, machine_mode); -static rtx builtin_memcpy_read_str (void *, HOST_WIDE_INT, scalar_int_mode); static rtx expand_builtin_memchr (tree, rtx); static rtx expand_builtin_memcpy (tree, rtx); static rtx expand_builtin_memory_copy_args (tree dest, tree src, tree len, @@ -145,7 +144,6 @@ static rtx expand_builtin_stpcpy (tree, rtx, machine_mode); static rtx expand_builtin_stpncpy (tree, rtx); static rtx expand_builtin_strncat (tree, rtx); static rtx expand_builtin_strncpy (tree, rtx); -static rtx builtin_memset_gen_str (void *, HOST_WIDE_INT, scalar_int_mode); static rtx expand_builtin_memset (tree, rtx, machine_mode); static rtx expand_builtin_memset_args (tree, tree, tree, rtx, machine_mode, tree); static rtx expand_builtin_bzero (tree); @@ -3860,7 +3858,7 @@ expand_builtin_strnlen (tree exp, rtx target, machine_mode target_mode) a target constant. */ static rtx -builtin_memcpy_read_str (void *data, HOST_WIDE_INT offset, +builtin_memcpy_read_str (void *data, void *, HOST_WIDE_INT offset, scalar_int_mode mode) { /* The REPresentation pointed to by DATA need not be a nul-terminated @@ -6373,7 +6371,7 @@ expand_builtin_stpncpy (tree exp, rtx) constant. */ rtx -builtin_strncpy_read_str (void *data, HOST_WIDE_INT offset, +builtin_strncpy_read_str (void *data, void *, HOST_WIDE_INT offset, scalar_int_mode mode) { const char *str = (const char *) data; @@ -6584,12 +6582,22 @@ expand_builtin_strncpy (tree exp, rtx target) /* Callback routine for store_by_pieces. Read GET_MODE_BITSIZE (MODE) bytes from constant string DATA + OFFSET and return it as target - constant. */ + constant. If PREV isn't nullptr, it has the RTL info from the + previous iteration. */ rtx -builtin_memset_read_str (void *data, HOST_WIDE_INT offset ATTRIBUTE_UNUSED, +builtin_memset_read_str (void *data, void *prevp, + HOST_WIDE_INT offset ATTRIBUTE_UNUSED, scalar_int_mode mode) { + by_pieces_prev *prev = (by_pieces_prev *) prevp; + if (prev != nullptr && prev->data != nullptr) + { + /* Use the previous data in the same mode. */ + if (prev->mode == mode) + return prev->data; + } + const char *c = (const char *) data; char *p = XALLOCAVEC (char, GET_MODE_SIZE (mode)); @@ -6601,16 +6609,28 @@ builtin_memset_read_str (void *data, HOST_WIDE_INT offset ATTRIBUTE_UNUSED, /* Callback routine for store_by_pieces. Return the RTL of a register containing GET_MODE_SIZE (MODE) consecutive copies of the unsigned char value given in the RTL register data. For example, if mode is - 4 bytes wide, return the RTL for 0x01010101*data. */ + 4 bytes wide, return the RTL for 0x01010101*data. If PREV isn't + nullptr, it has the RTL info from the previous iteration. */ static rtx -builtin_memset_gen_str (void *data, HOST_WIDE_INT offset ATTRIBUTE_UNUSED, +builtin_memset_gen_str (void *data, void *prevp, + HOST_WIDE_INT offset ATTRIBUTE_UNUSED, scalar_int_mode mode) { rtx target, coeff; size_t size; char *p; + by_pieces_prev *prev = (by_pieces_prev *) prevp; + if (prev != nullptr && prev->data != nullptr) + { + /* Use the previous data in the same mode. */ + if (prev->mode == mode) + return prev->data; + + return simplify_gen_subreg (mode, prev->data, prev->mode, 0); + } + size = GET_MODE_SIZE (mode); if (size == 1) return (rtx) data; -- cgit v1.1 From 86c77c52f7b812adccf9620860f7c392f9a16cfc Mon Sep 17 00:00:00 2001 From: "H.J. Lu" Date: Thu, 29 Apr 2021 11:12:09 -0700 Subject: Don't use nullptr return from simplify_gen_subreg Check nullptr return from simplify_gen_subreg. Don't use it if it is nullptr. PR middle-end/90773 * builtins.c (builtin_memset_gen_str): Don't use return from simplify_gen_subreg. --- gcc/builtins.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index 2d6bf4a..4613aec 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -6628,7 +6628,9 @@ builtin_memset_gen_str (void *data, void *prevp, if (prev->mode == mode) return prev->data; - return simplify_gen_subreg (mode, prev->data, prev->mode, 0); + target = simplify_gen_subreg (mode, prev->data, prev->mode, 0); + if (target != nullptr) + return target; } size = GET_MODE_SIZE (mode); -- cgit v1.1 From da9e6e63d1ae22e530ec7baf59f6ed028bf05776 Mon Sep 17 00:00:00 2001 From: Alexandre Oliva Date: Mon, 3 May 2021 22:48:47 -0300 Subject: introduce try store by multiple pieces The ldist pass turns even very short loops into memset calls. E.g., the TFmode emulation calls end with a loop of up to 3 iterations, to zero out trailing words, and the loop distribution pass turns them into calls of the memset builtin. Though short constant-length clearing memsets are usually dealt with efficiently, for non-constant-length ones, the options are setmemM, or a function calls. RISC-V doesn't have any setmemM pattern, so the loops above end up "optimized" into memset calls, incurring not only the overhead of an explicit call, but also discarding the information the compiler has about the alignment of the destination, and that the length is a multiple of the word alignment. This patch handles variable lengths with multiple conditional power-of-2-constant-sized stores-by-pieces, so as to reduce the overhead of length compares. It also changes the last copy-prop pass into ccp, so that pointer alignment and length's nonzero bits are detected and made available for the expander, even for ldist-introduced SSA_NAMEs. for gcc/ChangeLog * builtins.c (try_store_by_multiple_pieces): New. (expand_builtin_memset_args): Use it. If target_char_cast fails, proceed as for non-constant val. Pass len's ctz to... * expr.c (clear_storage_hints): ... this. Try store by multiple pieces after setmem. (clear_storage): Adjust. * expr.h (clear_storage_hints): Likewise. (try_store_by_multiple_pieces): Declare. * passes.def: Replace the last copy_prop with ccp. --- gcc/builtins.c | 182 ++++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 175 insertions(+), 7 deletions(-) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index 4613aec..b047128 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -6667,6 +6667,166 @@ expand_builtin_memset (tree exp, rtx target, machine_mode mode) return expand_builtin_memset_args (dest, val, len, target, mode, exp); } +/* Try to store VAL (or, if NULL_RTX, VALC) in LEN bytes starting at TO. + Return TRUE if successful, FALSE otherwise. TO is assumed to be + aligned at an ALIGN-bits boundary. LEN must be a multiple of + 1<= 0); + + if (val) + valc = 1; + + /* Bits more significant than TST_BITS are part of the shared prefix + in the binary representation of both min_len and max_len. Since + they're identical, we don't need to test them in the loop. */ + int tst_bits = (max_bits != min_bits ? max_bits + : floor_log2 (max_len ^ min_len)); + + /* Check whether it's profitable to start by storing a fixed BLKSIZE + bytes, to lower max_bits. In the unlikely case of a constant LEN + (implied by identical MAX_LEN and MIN_LEN), we want to issue a + single store_by_pieces, but otherwise, select the minimum multiple + of the ALIGN (in bytes) and of the MCD of the possible LENs, that + brings MAX_LEN below TST_BITS, if that's lower than min_len. */ + unsigned HOST_WIDE_INT blksize; + if (max_len > min_len) + { + unsigned HOST_WIDE_INT alrng = MAX (HOST_WIDE_INT_1U << ctz_len, + align / BITS_PER_UNIT); + blksize = max_len - (HOST_WIDE_INT_1U << tst_bits) + alrng; + blksize &= ~(alrng - 1); + } + else if (max_len == min_len) + blksize = max_len; + else + gcc_unreachable (); + if (min_len >= blksize) + { + min_len -= blksize; + min_bits = floor_log2 (min_len); + max_len -= blksize; + max_bits = floor_log2 (max_len); + + tst_bits = (max_bits != min_bits ? max_bits + : floor_log2 (max_len ^ min_len)); + } + else + blksize = 0; + + /* Check that we can use store by pieces for the maximum store count + we may issue (initial fixed-size block, plus conditional + power-of-two-sized from max_bits to ctz_len. */ + unsigned HOST_WIDE_INT xlenest = blksize; + if (max_bits >= 0) + xlenest += ((HOST_WIDE_INT_1U << max_bits) * 2 + - (HOST_WIDE_INT_1U << ctz_len)); + if (!can_store_by_pieces (xlenest, builtin_memset_read_str, + &valc, align, true)) + return false; + + rtx (*constfun) (void *, HOST_WIDE_INT, scalar_int_mode); + void *constfundata; + if (val) + { + constfun = builtin_memset_gen_str; + constfundata = val = force_reg (TYPE_MODE (unsigned_char_type_node), + val); + } + else + { + constfun = builtin_memset_read_str; + constfundata = &valc; + } + + rtx ptr = copy_addr_to_reg (convert_to_mode (ptr_mode, XEXP (to, 0), 0)); + rtx rem = copy_to_mode_reg (ptr_mode, convert_to_mode (ptr_mode, len, 0)); + to = replace_equiv_address (to, ptr); + set_mem_align (to, align); + + if (blksize) + { + to = store_by_pieces (to, blksize, + constfun, constfundata, + align, true, + max_len != 0 ? RETURN_END : RETURN_BEGIN); + if (max_len == 0) + return true; + + /* Adjust PTR, TO and REM. Since TO's address is likely + PTR+offset, we have to replace it. */ + emit_move_insn (ptr, XEXP (to, 0)); + to = replace_equiv_address (to, ptr); + emit_move_insn (rem, plus_constant (ptr_mode, rem, -blksize)); + } + + /* Iterate over power-of-two block sizes from the maximum length to + the least significant bit possibly set in the length. */ + for (int i = max_bits; i >= sctz_len; i--) + { + rtx_code_label *label = NULL; + blksize = HOST_WIDE_INT_1U << i; + + /* If we're past the bits shared between min_ and max_len, expand + a test on the dynamic length, comparing it with the + BLKSIZE. */ + if (i <= tst_bits) + { + label = gen_label_rtx (); + emit_cmp_and_jump_insns (rem, GEN_INT (blksize), LT, NULL, + ptr_mode, 1, label, + profile_probability::even ()); + } + /* If we are at a bit that is in the prefix shared by min_ and + max_len, skip this BLKSIZE if the bit is clear. */ + else if ((max_len & blksize) == 0) + continue; + + /* Issue a store of BLKSIZE bytes. */ + to = store_by_pieces (to, blksize, + constfun, constfundata, + align, true, + i != sctz_len ? RETURN_END : RETURN_BEGIN); + + /* Adjust REM and PTR, unless this is the last iteration. */ + if (i != sctz_len) + { + emit_move_insn (ptr, XEXP (to, 0)); + to = replace_equiv_address (to, ptr); + emit_move_insn (rem, plus_constant (ptr_mode, rem, -blksize)); + } + + if (label) + { + emit_label (label); + + /* Given conditional stores, the offset can no longer be + known, so clear it. */ + clear_mem_offset (to); + } + } + + return true; +} + /* Helper function to do the actual work for expand_builtin_memset. The arguments to the builtin_memset call DEST, VAL, and LEN are broken out so that this can also be called without constructing an actual CALL_EXPR. @@ -6721,7 +6881,8 @@ expand_builtin_memset_args (tree dest, tree val, tree len, dest_mem = get_memory_rtx (dest, len); val_mode = TYPE_MODE (unsigned_char_type_node); - if (TREE_CODE (val) != INTEGER_CST) + if (TREE_CODE (val) != INTEGER_CST + || target_char_cast (val, &c)) { rtx val_rtx; @@ -6745,7 +6906,12 @@ expand_builtin_memset_args (tree dest, tree val, tree len, else if (!set_storage_via_setmem (dest_mem, len_rtx, val_rtx, dest_align, expected_align, expected_size, min_size, max_size, - probable_max_size)) + probable_max_size) + && !try_store_by_multiple_pieces (dest_mem, len_rtx, + tree_ctz (len), + min_size, max_size, + val_rtx, 0, + dest_align)) goto do_libcall; dest_mem = force_operand (XEXP (dest_mem, 0), NULL_RTX); @@ -6753,9 +6919,6 @@ expand_builtin_memset_args (tree dest, tree val, tree len, return dest_mem; } - if (target_char_cast (val, &c)) - goto do_libcall; - if (c) { if (tree_fits_uhwi_p (len) @@ -6769,7 +6932,12 @@ expand_builtin_memset_args (tree dest, tree val, tree len, gen_int_mode (c, val_mode), dest_align, expected_align, expected_size, min_size, max_size, - probable_max_size)) + probable_max_size) + && !try_store_by_multiple_pieces (dest_mem, len_rtx, + tree_ctz (len), + min_size, max_size, + NULL_RTX, c, + dest_align)) goto do_libcall; dest_mem = force_operand (XEXP (dest_mem, 0), NULL_RTX); @@ -6783,7 +6951,7 @@ expand_builtin_memset_args (tree dest, tree val, tree len, ? BLOCK_OP_TAILCALL : BLOCK_OP_NORMAL, expected_align, expected_size, min_size, max_size, - probable_max_size); + probable_max_size, tree_ctz (len)); if (dest_addr == 0) { -- cgit v1.1 From 5fbe6a8e73b52c6ebc28b9111456226c1cda6472 Mon Sep 17 00:00:00 2001 From: Prathamesh Kulkarni Date: Tue, 4 May 2021 11:11:18 +0530 Subject: Fix typo in builtins.c. gcc/ChangeLog: 2021-05-04 Prathamesh Kulkarni * builtins.c (try_store_by_multiple_pieces): Fix constfun's prototype. --- gcc/builtins.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index b047128..14cfa57 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -6743,7 +6743,7 @@ try_store_by_multiple_pieces (rtx to, rtx len, unsigned int ctz_len, &valc, align, true)) return false; - rtx (*constfun) (void *, HOST_WIDE_INT, scalar_int_mode); + rtx (*constfun) (void *, void *, HOST_WIDE_INT, scalar_int_mode); void *constfundata; if (val) { -- cgit v1.1 From f418bc3cd173bc4e679469928d4d96ffcc05fc7e Mon Sep 17 00:00:00 2001 From: Eric Botcazou Date: Tue, 4 May 2021 12:40:42 +0200 Subject: Fix libiberty link failures in LTO mode for MinGW The test for the presence of variables (really symbols) does not work when you add -Ox -flto to CFLAGS: for v in $vars; do AC_MSG_CHECKING([for $v]) AC_CACHE_VAL(libiberty_cv_var_$v, [AC_LINK_IFELSE([AC_LANG_PROGRAM([[int *p;]],[[extern int $v []; p = $v;]])], [eval "libiberty_cv_var_$v=yes"], [eval "libiberty_cv_var_$v=no"])]) if eval "test \"`echo '$libiberty_cv_var_'$v`\" = yes"; then AC_MSG_RESULT(yes) AC_DEFINE_UNQUOTED($n) else AC_MSG_RESULT(no) fi done because the assignment to 'p' is optimized away by LTO. This is visible on MinGW platforms in the form of a link failure for sys_siglist. There is another link failures for stpcpy: the symbol is both referenced by libiberty's pex-win32.c and provided by libiberty's stpcpy.c, so it needs to have a linkage to be resolved in LTO mode. libiberty/ * configure.ac: Make test for variables more robust. * configure: Regenerate. gcc/ * builtins.c (builtin_with_linkage_p): Return true for stp[n]cpy. * symtab.c (symtab_node::output_to_lto_symbol_table_p): Tidy up. --- gcc/builtins.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index 14cfa57..0db4090 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -14480,8 +14480,8 @@ target_char_cst_p (tree t, char *p) } /* Return true if the builtin DECL is implemented in a standard library. - Otherwise returns false which doesn't guarantee it is not (thus the list of - handled builtins below may be incomplete). */ + Otherwise return false which doesn't guarantee it is not (thus the list + of handled builtins below may be incomplete). */ bool builtin_with_linkage_p (tree decl) @@ -14560,6 +14560,14 @@ builtin_with_linkage_p (tree decl) CASE_FLT_FN (BUILT_IN_TRUNC): CASE_FLT_FN_FLOATN_NX (BUILT_IN_TRUNC): return true; + + case BUILT_IN_STPCPY: + case BUILT_IN_STPNCPY: + /* stpcpy is both referenced in libiberty's pex-win32.c and provided + by libiberty's stpcpy.c for MinGW targets so we need to return true + in order to be able to build libiberty in LTO mode for them. */ + return true; + default: break; } -- cgit v1.1 From 158cdc7bd97d7ccca5bc8adaaf80fe51eacdc038 Mon Sep 17 00:00:00 2001 From: Martin Sebor Date: Tue, 4 May 2021 13:46:37 -0600 Subject: PR middle-end/100307 - spurious -Wplacement-new with negative pointer offset gcc/ChangeLog: PR middle-end/100307 * builtins.c (compute_objsize_r): Clear base0 for pointers. gcc/testsuite/ChangeLog: PR middle-end/100307 * g++.dg/warn/Wplacement-new-size-9.C: New test. * gcc.dg/tree-ssa/builtin-sprintf-warn-26.c: New test. --- gcc/builtins.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index 0db4090..6f67eb7 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -5447,8 +5447,10 @@ compute_objsize_r (tree ptr, int ostype, access_ref *pref, if (!addr && POINTER_TYPE_P (TREE_TYPE (ptr))) { /* Set the maximum size if the reference is to the pointer - itself (as opposed to what it points to). */ + itself (as opposed to what it points to), and clear + BASE0 since the offset isn't necessarily zero-based. */ pref->set_max_size_range (); + pref->base0 = false; return true; } -- cgit v1.1 From 07d7d37d1a33efb04f1262e56f4b82d6e1089e75 Mon Sep 17 00:00:00 2001 From: Andrew Stubbs Date: Fri, 7 May 2021 15:31:05 +0100 Subject: Ensure emit_move_insn operands are valid Some architectures are fine with PLUS in move instructions, but others are not (amdgcn is the motivating example). 2021-05-07 Jakub Jelinek Andrew Stubbs gcc/ChangeLog: PR target/100418 * builtins.c (try_store_by_multiple_pieces): Use force_operand for emit_move_insn operands. --- gcc/builtins.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index 6f67eb7..bc40bad 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -6775,9 +6775,10 @@ try_store_by_multiple_pieces (rtx to, rtx len, unsigned int ctz_len, /* Adjust PTR, TO and REM. Since TO's address is likely PTR+offset, we have to replace it. */ - emit_move_insn (ptr, XEXP (to, 0)); + emit_move_insn (ptr, force_operand (XEXP (to, 0), NULL_RTX)); to = replace_equiv_address (to, ptr); - emit_move_insn (rem, plus_constant (ptr_mode, rem, -blksize)); + rtx rem_minus_blksize = plus_constant (ptr_mode, rem, -blksize); + emit_move_insn (rem, force_operand (rem_minus_blksize, NULL_RTX)); } /* Iterate over power-of-two block sizes from the maximum length to @@ -6811,9 +6812,10 @@ try_store_by_multiple_pieces (rtx to, rtx len, unsigned int ctz_len, /* Adjust REM and PTR, unless this is the last iteration. */ if (i != sctz_len) { - emit_move_insn (ptr, XEXP (to, 0)); + emit_move_insn (ptr, force_operand (XEXP (to, 0), NULL_RTX)); to = replace_equiv_address (to, ptr); - emit_move_insn (rem, plus_constant (ptr_mode, rem, -blksize)); + rtx rem_minus_blksize = plus_constant (ptr_mode, rem, -blksize); + emit_move_insn (rem, force_operand (rem_minus_blksize, NULL_RTX)); } if (label) -- cgit v1.1 From 6ba3079dce89d9b63bf5dbd5e320ea2bf96f196b Mon Sep 17 00:00:00 2001 From: Martin Liska Date: Wed, 17 Mar 2021 16:36:44 +0100 Subject: Come up with startswith function. gcc/ada/ChangeLog: * gcc-interface/utils.c (def_builtin_1): Use startswith function instead of strncmp. gcc/analyzer/ChangeLog: * sm-file.cc (is_file_using_fn_p): Use startswith function instead of strncmp. gcc/ChangeLog: * builtins.c (is_builtin_name): Use startswith function instead of strncmp. * collect2.c (main): Likewise. (has_lto_section): Likewise. (scan_libraries): Likewise. * coverage.c (coverage_checksum_string): Likewise. (coverage_init): Likewise. * dwarf2out.c (is_cxx): Likewise. (gen_compile_unit_die): Likewise. * gcc-ar.c (main): Likewise. * gcc.c (init_spec): Likewise. (read_specs): Likewise. (execute): Likewise. (check_live_switch): Likewise. * genattrtab.c (write_attr_case): Likewise. (IS_ATTR_GROUP): Likewise. * gencfn-macros.c (main): Likewise. * gengtype.c (type_for_name): Likewise. (gen_rtx_next): Likewise. (get_file_langdir): Likewise. (write_local): Likewise. * genmatch.c (get_operator): Likewise. (get_operand_type): Likewise. (expr::gen_transform): Likewise. * genoutput.c (validate_optab_operands): Likewise. * incpath.c (add_sysroot_to_chain): Likewise. * langhooks.c (lang_GNU_C): Likewise. (lang_GNU_CXX): Likewise. (lang_GNU_Fortran): Likewise. (lang_GNU_OBJC): Likewise. * lto-wrapper.c (run_gcc): Likewise. * omp-general.c (omp_max_simt_vf): Likewise. * omp-low.c (omp_runtime_api_call): Likewise. * opts-common.c (parse_options_from_collect_gcc_options): Likewise. * read-rtl-function.c (function_reader::read_rtx_operand_r): Likewise. * real.c (real_from_string): Likewise. * selftest.c (assert_str_startswith): Likewise. * timevar.c (timer::validate_phases): Likewise. * tree.c (get_file_function_name): Likewise. * ubsan.c (ubsan_use_new_style_p): Likewise. * varasm.c (default_function_rodata_section): Likewise. (incorporeal_function_p): Likewise. (default_section_type_flags): Likewise. * system.h (startswith): Define startswith. gcc/c-family/ChangeLog: * c-ada-spec.c (print_destructor): Use startswith function instead of strncmp. (dump_ada_declaration): Likewise. * c-common.c (disable_builtin_function): Likewise. (def_builtin_1): Likewise. * c-format.c (check_tokens): Likewise. (check_plain): Likewise. (convert_format_name_to_system_name): Likewise. gcc/c/ChangeLog: * c-aux-info.c (affix_data_type): Use startswith function instead of strncmp. * c-typeck.c (build_function_call_vec): Likewise. * gimple-parser.c (c_parser_gimple_parse_bb_spec): Likewise. gcc/cp/ChangeLog: * decl.c (duplicate_decls): Use startswith function instead of strncmp. (cxx_builtin_function): Likewise. (omp_declare_variant_finalize_one): Likewise. (grokfndecl): Likewise. * error.c (dump_decl_name): Likewise. * mangle.c (find_decomp_unqualified_name): Likewise. (write_guarded_var_name): Likewise. (decl_tls_wrapper_p): Likewise. * parser.c (cp_parser_simple_type_specifier): Likewise. (cp_parser_tx_qualifier_opt): Likewise. * pt.c (template_parm_object_p): Likewise. (dguide_name_p): Likewise. gcc/d/ChangeLog: * d-builtins.cc (do_build_builtin_fn): Use startswith function instead of strncmp. * dmd/dinterpret.c (evaluateIfBuiltin): Likewise. * dmd/dmangle.c: Likewise. * dmd/hdrgen.c: Likewise. * dmd/identifier.c (Identifier::toHChars2): Likewise. gcc/fortran/ChangeLog: * decl.c (variable_decl): Use startswith function instead of strncmp. (gfc_match_end): Likewise. * gfortran.h (gfc_str_startswith): Likewise. * module.c (load_omp_udrs): Likewise. (read_module): Likewise. * options.c (gfc_handle_runtime_check_option): Likewise. * primary.c (match_arg_list_function): Likewise. * trans-decl.c (gfc_get_symbol_decl): Likewise. * trans-expr.c (gfc_conv_procedure_call): Likewise. * trans-intrinsic.c (gfc_conv_ieee_arithmetic_function): Likewise. gcc/go/ChangeLog: * gofrontend/runtime.cc (Runtime::name_to_code): Use startswith function instead of strncmp. gcc/objc/ChangeLog: * objc-act.c (objc_string_ref_type_p): Use startswith function instead of strncmp. * objc-encoding.c (encode_type): Likewise. * objc-next-runtime-abi-02.c (has_load_impl): Likewise. --- gcc/builtins.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index bc40bad..2f0efae 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -738,13 +738,9 @@ pointer_query::flush_cache () static bool is_builtin_name (const char *name) { - if (strncmp (name, "__builtin_", 10) == 0) - return true; - if (strncmp (name, "__sync_", 7) == 0) - return true; - if (strncmp (name, "__atomic_", 9) == 0) - return true; - return false; + return (startswith (name, "__builtin_") + || startswith (name, "__sync_") + || startswith (name, "__atomic_")); } /* Return true if NODE should be considered for inline expansion regardless -- cgit v1.1 From 5380e3c137b2bb63acd789cd09d01a4edc0a01c3 Mon Sep 17 00:00:00 2001 From: Martin Sebor Date: Thu, 13 May 2021 16:20:45 -0600 Subject: PR middle-end/100574 - ICE in size_remaining, at builtins.c gcc/ChangeLog: PR middle-end/100574 * builtins.c (access_ref::get_ref): Improve detection of PHIs with all null arguments. --- gcc/builtins.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index 2f0efae..e1b2848 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -362,15 +362,6 @@ access_ref::get_ref (vec *all_refs, same_ref.offrng[1] = phi_arg_ref.offrng[1]; } - if (phi_ref.sizrng[0] < 0) - { - /* Fail if none of the PHI's arguments resulted in updating PHI_REF - (perhaps because they have all been already visited by prior - recursive calls). */ - psnlim->leave_phi (ref); - return NULL_TREE; - } - if (!same_ref.ref && same_ref.offrng[0] != 0) /* Clear BASE0 if not all the arguments refer to the same object and if not all their offsets are zero-based. This allows the final @@ -390,6 +381,15 @@ access_ref::get_ref (vec *all_refs, phi_ref.parmarray = parmarray; } + if (phi_ref.sizrng[0] < 0) + { + /* Fail if none of the PHI's arguments resulted in updating PHI_REF + (perhaps because they have all been already visited by prior + recursive calls). */ + psnlim->leave_phi (ref); + return NULL_TREE; + } + /* Avoid changing *THIS. */ if (pref && pref != this) *pref = phi_ref; -- cgit v1.1 From e6683450f4a26dae7774be735a3429f48aee9565 Mon Sep 17 00:00:00 2001 From: Jakub Jelinek Date: Wed, 19 May 2021 12:05:30 +0200 Subject: builtins: Fix ICE with unprototyped builtin call [PR100576] MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For unprototyped builtins the checking we perform is only about whether the used argument is integral, pointer etc., not the exact precision. We emit a warning about the problem though: pr100576.c: In function ‘foo’: pr100576.c:9:11: warning: implicit declaration of function ‘memcmp’ [-Wimplicit-function-declaration] 9 | int n = memcmp (p, v, b); | ^~~~~~ pr100576.c:1:1: note: include ‘’ or provide a declaration of ‘memcmp’ +++ |+#include 1 | /* PR middle-end/100576 */ pr100576.c:9:25: warning: ‘memcmp’ argument 3 type is ‘int’ where ‘long unsigned int’ is expected in a call to built-in function declared without prototype +[-Wbuiltin-declaration-mismatch] 9 | int n = memcmp (p, v, b); | ^ It means in the testcase below where the user incorrectly called memcmp with last argument int rather then size_t, the warning stuff in builtins.c ICEs because it compares a wide_int from such a bound with another wide_int which has precision of size_t/sizetype and wide_int asserts the compared wide_ints are compatible. Fixed by forcing the bound to have the right type. 2021-05-19 Jakub Jelinek PR middle-end/100576 * builtins.c (check_read_access): Convert bound to size_type_node if non-NULL. * gcc.c-torture/compile/pr100576.c: New test. --- gcc/builtins.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index e1b2848..6a2875e 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -4904,6 +4904,8 @@ check_read_access (tree exp, tree src, tree bound /* = NULL_TREE */, if (!warn_stringop_overread) return true; + if (bound && !useless_type_conversion_p (size_type_node, TREE_TYPE (bound))) + bound = fold_convert (size_type_node, bound); access_data data (exp, access_read_only, NULL_TREE, false, bound, true); compute_objsize (src, ost, &data.src); return check_access (exp, /*dstwrite=*/ NULL_TREE, /*maxread=*/ bound, -- cgit v1.1 From 2e6ad1ba532fe684633edac766c598be19ad3b59 Mon Sep 17 00:00:00 2001 From: Richard Biener Date: Wed, 19 May 2021 10:20:37 +0200 Subject: Enable more WITH_SIZE_EXPR processing This enables the alias machinery for WITH_SIZE_EXPR which can appear in call LHS and arguments. In particular this drops the NULL return from get_base_address and it adjusts get_ref_base_and_extent and friends to use the size information in WITH_SIZE_EXPR and look through it for further processing. 2021-05-19 Richard Biener * builtins.c (get_object_alignment_1): Strip outer WITH_SIZE_EXPR. * tree-dfa.c (get_ref_base_and_extent): Handle outer WITH_SIZE_EXPR for size processing and process the containing ref. * tree-ssa-alias.c (ao_ref_base_alias_set): Strip outer WITH_SIZE_EXPR. (ao_ref_base_alias_ptr_type): Likewise. (refs_may_alias_p_2): Allow WITH_SIZE_EXPR in ref->ref and handle that accordingly, stripping it for the core alias workers. * tree.c (get_base_address): Handle WITH_SIZE_EXPR by looking through it instead of returning NULL. --- gcc/builtins.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index 6a2875e..b0c880d 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -924,6 +924,10 @@ bool get_object_alignment_1 (tree exp, unsigned int *alignp, unsigned HOST_WIDE_INT *bitposp) { + /* Strip a WITH_SIZE_EXPR, get_inner_reference doesn't know how to deal + with it. */ + if (TREE_CODE (exp) == WITH_SIZE_EXPR) + exp = TREE_OPERAND (exp, 0); return get_object_alignment_2 (exp, alignp, bitposp, false); } -- cgit v1.1 From 45f4e2b01b82c72b3a11ff4ad184d7edcf0e63d4 Mon Sep 17 00:00:00 2001 From: Aldy Hernandez Date: Wed, 19 May 2021 18:44:08 +0200 Subject: Convert remaining passes to get_range_query. This patch converts the remaining users of get_range_info and get_ptr_nonnull to the get_range_query API. No effort was made to move passes away from VR_ANTI_RANGE, or any other use of deprecated methods. This was a straight up conversion to the new API, nothing else. gcc/ChangeLog: * builtins.c (check_nul_terminated_array): Convert to get_range_query. (expand_builtin_strnlen): Same. (determine_block_size): Same. * fold-const.c (expr_not_equal_to): Same. * gimple-fold.c (size_must_be_zero_p): Same. * gimple-match-head.c: Include gimple-range.h. * gimple-pretty-print.c (dump_ssaname_info): Convert to get_range_query. * gimple-ssa-warn-restrict.c (builtin_memref::extend_offset_range): Same. * graphite-sese-to-poly.c (add_param_constraints): Same. * internal-fn.c (get_min_precision): Same. * ipa-fnsummary.c (set_switch_stmt_execution_predicate): Same. * ipa-prop.c (ipa_compute_jump_functions_for_edge): Same. * match.pd: Same. * tree-data-ref.c (split_constant_offset): Same. (dr_step_indicator): Same. * tree-dfa.c (get_ref_base_and_extent): Same. * tree-scalar-evolution.c (iv_can_overflow_p): Same. * tree-ssa-loop-niter.c (refine_value_range_using_guard): Same. (determine_value_range): Same. (record_nonwrapping_iv): Same. (infer_loop_bounds_from_signedness): Same. (scev_var_range_cant_overflow): Same. * tree-ssa-phiopt.c (two_value_replacement): Same. * tree-ssa-pre.c (insert_into_preds_of_block): Same. * tree-ssa-reassoc.c (optimize_range_tests_to_bit_test): Same. * tree-ssa-strlen.c (handle_builtin_stxncpy_strncat): Same. (get_range): Same. (dump_strlen_info): Same. (set_strlen_range): Same. (maybe_diag_stxncpy_trunc): Same. (get_len_or_size): Same. (handle_integral_assign): Same. * tree-ssa-structalias.c (find_what_p_points_to): Same. * tree-ssa-uninit.c (find_var_cmp_const): Same. * tree-switch-conversion.c (bit_test_cluster::emit): Same. * tree-vect-patterns.c (vect_get_range_info): Same. (vect_recog_divmod_pattern): Same. * tree-vrp.c (intersect_range_with_nonzero_bits): Same. (register_edge_assert_for_2): Same. (determine_value_range_1): Same. * tree.c (get_range_pos_neg): Same. * vr-values.c (vr_values::get_lattice_entry): Same. (vr_values::update_value_range): Same. (simplify_conversion_using_ranges): Same. --- gcc/builtins.c | 40 +++++++++++++++++++++++++++------------- 1 file changed, 27 insertions(+), 13 deletions(-) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index b0c880d..af1fe49b 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -79,6 +79,7 @@ along with GCC; see the file COPYING3. If not see #include "tree-outof-ssa.h" #include "attr-fnspec.h" #include "demangle.h" +#include "gimple-range.h" struct target_builtins default_target_builtins; #if SWITCHABLE_TARGET @@ -1218,14 +1219,15 @@ check_nul_terminated_array (tree expr, tree src, wide_int bndrng[2]; if (bound) { - if (TREE_CODE (bound) == INTEGER_CST) - bndrng[0] = bndrng[1] = wi::to_wide (bound); - else - { - value_range_kind rng = get_range_info (bound, bndrng, bndrng + 1); - if (rng != VR_RANGE) - return true; - } + value_range r; + + get_global_range_query ()->range_of_expr (r, bound); + + if (r.kind () != VR_RANGE) + return true; + + bndrng[0] = r.lower_bound (); + bndrng[1] = r.upper_bound (); if (exact) { @@ -3831,9 +3833,12 @@ expand_builtin_strnlen (tree exp, rtx target, machine_mode target_mode) return NULL_RTX; wide_int min, max; - enum value_range_kind rng = get_range_info (bound, &min, &max); - if (rng != VR_RANGE) + value_range r; + get_global_range_query ()->range_of_expr (r, bound); + if (r.kind () != VR_RANGE) return NULL_RTX; + min = r.lower_bound (); + max = r.upper_bound (); if (!len || TREE_CODE (len) != INTEGER_CST) { @@ -3901,7 +3906,16 @@ determine_block_size (tree len, rtx len_rtx, *probable_max_size = *max_size = GET_MODE_MASK (GET_MODE (len_rtx)); if (TREE_CODE (len) == SSA_NAME) - range_type = get_range_info (len, &min, &max); + { + value_range r; + get_global_range_query ()->range_of_expr (r, len); + range_type = r.kind (); + if (range_type != VR_UNDEFINED) + { + min = wi::to_wide (r.min ()); + max = wi::to_wide (r.max ()); + } + } if (range_type == VR_RANGE) { if (wi::fits_uhwi_p (min) && *min_size < min.to_uhwi ()) @@ -4920,8 +4934,8 @@ check_read_access (tree exp, tree src, tree bound /* = NULL_TREE */, /* If STMT is a call to an allocation function, returns the constant maximum size of the object allocated by the call represented as sizetype. If nonnull, sets RNG1[] to the range of the size. - When nonnull, uses RVALS for range information, otherwise calls - get_range_info to get it. + When nonnull, uses RVALS for range information, otherwise gets global + range info. Returns null when STMT is not a call to a valid allocation function. */ tree -- cgit v1.1 From d9f1466f88abef7c814d02ba39a6ea5ef420aaec Mon Sep 17 00:00:00 2001 From: Martin Sebor Date: Mon, 14 Jun 2021 16:34:48 -0600 Subject: Teach compute_objsize about placement new [PR100876]. Resolves: PR c++/100876 - -Wmismatched-new-delete should understand placement new when it's not inlined gcc/ChangeLog: PR c++/100876 * builtins.c (gimple_call_return_array): Check for attribute fn spec. Handle calls to placement new. (ndecl_dealloc_argno): Avoid placement delete. gcc/testsuite/ChangeLog: PR c++/100876 * g++.dg/warn/Wmismatched-new-delete-4.C: New test. * g++.dg/warn/Wmismatched-new-delete-5.C: New test. * g++.dg/warn/Wstringop-overflow-7.C: New test. * g++.dg/warn/Wfree-nonheap-object-6.C: New test. * g++.dg/analyzer/placement-new.C: Prune out expected warning. --- gcc/builtins.c | 47 ++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 44 insertions(+), 3 deletions(-) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index af1fe49b..75419cc 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -5159,11 +5159,42 @@ static tree gimple_call_return_array (gimple *stmt, offset_int offrng[2], range_query *rvals) { - if (!gimple_call_builtin_p (stmt, BUILT_IN_NORMAL) - || gimple_call_num_args (stmt) < 1) + { + /* Check for attribute fn spec to see if the function returns one + of its arguments. */ + attr_fnspec fnspec = gimple_call_fnspec (as_a (stmt)); + unsigned int argno; + if (fnspec.returns_arg (&argno)) + { + offrng[0] = offrng[1] = 0; + return gimple_call_arg (stmt, argno); + } + } + + if (gimple_call_num_args (stmt) < 1) return NULL_TREE; tree fn = gimple_call_fndecl (stmt); + if (!gimple_call_builtin_p (stmt, BUILT_IN_NORMAL)) + { + /* See if this is a call to placement new. */ + if (!fn + || !DECL_IS_OPERATOR_NEW_P (fn) + || DECL_IS_REPLACEABLE_OPERATOR_NEW_P (fn)) + return NULL_TREE; + + tree fname = DECL_ASSEMBLER_NAME (fn); + if (!id_equal (fname, "_ZnwmPv") // ordinary form + && !id_equal (fname, "_ZnamPv")) // array form + return NULL_TREE; + + if (gimple_call_num_args (stmt) != 2) + return NULL_TREE; + + offrng[0] = offrng[1] = 0; + return gimple_call_arg (stmt, 1); + } + switch (DECL_FUNCTION_CODE (fn)) { case BUILT_IN_MEMCPY: @@ -13285,7 +13316,17 @@ fndecl_dealloc_argno (tree fndecl) { /* A call to operator delete isn't recognized as one to a built-in. */ if (DECL_IS_OPERATOR_DELETE_P (fndecl)) - return 0; + { + if (DECL_IS_REPLACEABLE_OPERATOR (fndecl)) + return 0; + + /* Avoid placement delete that's not been inlined. */ + tree fname = DECL_ASSEMBLER_NAME (fndecl); + if (id_equal (fname, "_ZdlPvS_") // ordinary form + || id_equal (fname, "_ZdaPvS_")) // array form + return UINT_MAX; + return 0; + } /* TODO: Handle user-defined functions with attribute malloc? Handle known non-built-ins like fopen? */ -- cgit v1.1 From 71790f398e119c7fed867b0cfce60a7500629dff Mon Sep 17 00:00:00 2001 From: Martin Sebor Date: Tue, 15 Jun 2021 12:42:06 -0600 Subject: Consider size_t mangling as unsigned int and long [PR100876]. gcc/ChangeLog: PR middle-end/100876 * builtins.c: (gimple_call_return_array): Account for size_t mangling as either unsigned int or unsigned long --- gcc/builtins.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index 75419cc..855ad1e 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -5183,8 +5183,12 @@ gimple_call_return_array (gimple *stmt, offset_int offrng[2], || DECL_IS_REPLACEABLE_OPERATOR_NEW_P (fn)) return NULL_TREE; + /* Check the mangling, keeping in mind that operator new takes + a size_t which could be unsigned int or unsigned long. */ tree fname = DECL_ASSEMBLER_NAME (fn); - if (!id_equal (fname, "_ZnwmPv") // ordinary form + if (!id_equal (fname, "_ZnwjPv") // ordinary form + && !id_equal (fname, "_ZnwmPv") // ordinary form + && !id_equal (fname, "_ZnajPv") // array form && !id_equal (fname, "_ZnamPv")) // array form return NULL_TREE; -- cgit v1.1 From e9e2bad7251477db92ab9ebcdc010f9282dd9890 Mon Sep 17 00:00:00 2001 From: Martin Sebor Date: Thu, 24 Jun 2021 19:22:06 -0600 Subject: middle-end: add support for per-location warning groups. gcc/ChangeLog: * builtins.c (warn_string_no_nul): Replace uses of TREE_NO_WARNING, gimple_no_warning_p and gimple_set_no_warning with warning_suppressed_p, and suppress_warning. (c_strlen): Same. (maybe_warn_for_bound): Same. (warn_for_access): Same. (check_access): Same. (expand_builtin_strncmp): Same. (fold_builtin_varargs): Same. * calls.c (maybe_warn_nonstring_arg): Same. (maybe_warn_rdwr_sizes): Same. * cfgexpand.c (expand_call_stmt): Same. * cgraphunit.c (check_global_declaration): Same. * fold-const.c (fold_undefer_overflow_warnings): Same. (fold_truth_not_expr): Same. (fold_unary_loc): Same. (fold_checksum_tree): Same. * gimple-array-bounds.cc (array_bounds_checker::check_array_ref): Same. (array_bounds_checker::check_mem_ref): Same. (array_bounds_checker::check_addr_expr): Same. (array_bounds_checker::check_array_bounds): Same. * gimple-expr.c (copy_var_decl): Same. * gimple-fold.c (gimple_fold_builtin_strcpy): Same. (gimple_fold_builtin_strncat): Same. (gimple_fold_builtin_stxcpy_chk): Same. (gimple_fold_builtin_stpcpy): Same. (gimple_fold_builtin_sprintf): Same. (fold_stmt_1): Same. * gimple-ssa-isolate-paths.c (diag_returned_locals): Same. * gimple-ssa-nonnull-compare.c (do_warn_nonnull_compare): Same. * gimple-ssa-sprintf.c (handle_printf_call): Same. * gimple-ssa-store-merging.c (imm_store_chain_info::output_merged_store): Same. * gimple-ssa-warn-restrict.c (maybe_diag_overlap): Same. * gimple-ssa-warn-restrict.h: Adjust declarations. (maybe_diag_access_bounds): Replace uses of TREE_NO_WARNING, gimple_no_warning_p and gimple_set_no_warning with warning_suppressed_p, and suppress_warning. (check_call): Same. (check_bounds_or_overlap): Same. * gimple.c (gimple_build_call_from_tree): Same. * gimplify.c (gimplify_return_expr): Same. (gimplify_cond_expr): Same. (gimplify_modify_expr_complex_part): Same. (gimplify_modify_expr): Same. (gimple_push_cleanup): Same. (gimplify_expr): Same. * omp-expand.c (expand_omp_for_generic): Same. (expand_omp_taskloop_for_outer): Same. * omp-low.c (lower_rec_input_clauses): Same. (lower_lastprivate_clauses): Same. (lower_send_clauses): Same. (lower_omp_target): Same. * tree-cfg.c (pass_warn_function_return::execute): Same. * tree-complex.c (create_one_component_var): Same. * tree-inline.c (remap_gimple_op_r): Same. (copy_tree_body_r): Same. (declare_return_variable): Same. (expand_call_inline): Same. * tree-nested.c (lookup_field_for_decl): Same. * tree-sra.c (create_access_replacement): Same. (generate_subtree_copies): Same. * tree-ssa-ccp.c (pass_post_ipa_warn::execute): Same. * tree-ssa-forwprop.c (combine_cond_expr_cond): Same. * tree-ssa-loop-ch.c (ch_base::copy_headers): Same. * tree-ssa-loop-im.c (execute_sm): Same. * tree-ssa-phiopt.c (cond_store_replacement): Same. * tree-ssa-strlen.c (maybe_warn_overflow): Same. (handle_builtin_strcpy): Same. (maybe_diag_stxncpy_trunc): Same. (handle_builtin_stxncpy_strncat): Same. (handle_builtin_strcat): Same. * tree-ssa-uninit.c (get_no_uninit_warning): Same. (set_no_uninit_warning): Same. (uninit_undefined_value_p): Same. (warn_uninit): Same. (maybe_warn_operand): Same. * tree-vrp.c (compare_values_warnv): Same. * vr-values.c (vr_values::extract_range_for_var_from_comparison_expr): Same. (test_for_singularity): Same. * gimple.h (warning_suppressed_p): New function. (suppress_warning): Same. (copy_no_warning): Same. (gimple_set_block): Call gimple_set_location. (gimple_set_location): Call copy_warning. --- gcc/builtins.c | 90 +++++++++++++++++++++++++++++++--------------------------- 1 file changed, 48 insertions(+), 42 deletions(-) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index 855ad1e..e5e3938 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -1095,7 +1095,9 @@ warn_string_no_nul (location_t loc, tree expr, const char *fname, bool exact /* = false */, const wide_int bndrng[2] /* = NULL */) { - if ((expr && TREE_NO_WARNING (expr)) || TREE_NO_WARNING (arg)) + const opt_code opt = OPT_Wstringop_overread; + if ((expr && warning_suppressed_p (expr, opt)) + || warning_suppressed_p (arg, opt)) return; loc = expansion_point_location_if_in_system_header (loc); @@ -1123,14 +1125,14 @@ warn_string_no_nul (location_t loc, tree expr, const char *fname, if (bndrng) { if (wi::ltu_p (maxsiz, bndrng[0])) - warned = warning_at (loc, OPT_Wstringop_overread, + warned = warning_at (loc, opt, "%K%qD specified bound %s exceeds " "maximum object size %E", expr, func, bndstr, maxobjsize); else { bool maybe = wi::to_wide (size) == bndrng[0]; - warned = warning_at (loc, OPT_Wstringop_overread, + warned = warning_at (loc, opt, exact ? G_("%K%qD specified bound %s exceeds " "the size %E of unterminated array") @@ -1145,7 +1147,7 @@ warn_string_no_nul (location_t loc, tree expr, const char *fname, } } else - warned = warning_at (loc, OPT_Wstringop_overread, + warned = warning_at (loc, opt, "%K%qD argument missing terminating nul", expr, func); } @@ -1154,14 +1156,14 @@ warn_string_no_nul (location_t loc, tree expr, const char *fname, if (bndrng) { if (wi::ltu_p (maxsiz, bndrng[0])) - warned = warning_at (loc, OPT_Wstringop_overread, + warned = warning_at (loc, opt, "%qs specified bound %s exceeds " "maximum object size %E", fname, bndstr, maxobjsize); else { bool maybe = wi::to_wide (size) == bndrng[0]; - warned = warning_at (loc, OPT_Wstringop_overread, + warned = warning_at (loc, opt, exact ? G_("%qs specified bound %s exceeds " "the size %E of unterminated array") @@ -1176,7 +1178,7 @@ warn_string_no_nul (location_t loc, tree expr, const char *fname, } } else - warned = warning_at (loc, OPT_Wstringop_overread, + warned = warning_at (loc, opt, "%qs argument missing terminating nul", fname); } @@ -1185,9 +1187,9 @@ warn_string_no_nul (location_t loc, tree expr, const char *fname, { inform (DECL_SOURCE_LOCATION (decl), "referenced argument declared here"); - TREE_NO_WARNING (arg) = 1; + suppress_warning (arg, opt); if (expr) - TREE_NO_WARNING (expr) = 1; + suppress_warning (expr, opt); } } @@ -1445,14 +1447,14 @@ c_strlen (tree arg, int only_value, c_strlen_data *data, unsigned eltsize) { /* Suppress multiple warnings for propagated constant strings. */ if (only_value != 2 - && !TREE_NO_WARNING (arg) + && !warning_suppressed_p (arg, OPT_Warray_bounds) && warning_at (loc, OPT_Warray_bounds, "offset %qwi outside bounds of constant string", eltoff)) { if (decl) inform (DECL_SOURCE_LOCATION (decl), "%qE declared here", decl); - TREE_NO_WARNING (arg) = 1; + suppress_warning (arg, OPT_Warray_bounds); } return NULL_TREE; } @@ -3947,10 +3949,10 @@ determine_block_size (tree len, rtx len_rtx, accessing an object with SIZE. */ static bool -maybe_warn_for_bound (int opt, location_t loc, tree exp, tree func, +maybe_warn_for_bound (opt_code opt, location_t loc, tree exp, tree func, tree bndrng[2], tree size, const access_data *pad = NULL) { - if (!bndrng[0] || TREE_NO_WARNING (exp)) + if (!bndrng[0] || warning_suppressed_p (exp, opt)) return false; tree maxobjsize = max_object_size (); @@ -4042,7 +4044,7 @@ maybe_warn_for_bound (int opt, location_t loc, tree exp, tree func, inform (EXPR_LOCATION (pad->src.ref), "source object allocated here"); } - TREE_NO_WARNING (exp) = true; + suppress_warning (exp, opt); } return warned; @@ -4089,14 +4091,14 @@ maybe_warn_for_bound (int opt, location_t loc, tree exp, tree func, return false; else if (tree_int_cst_equal (bndrng[0], bndrng[1])) warned = (func - ? warning_at (loc, OPT_Wstringop_overflow_, + ? warning_at (loc, opt, (maybe ? G_("%K%qD specified bound %E may exceed " "destination size %E") : G_("%K%qD specified bound %E exceeds " "destination size %E")), exp, func, bndrng[0], size) - : warning_at (loc, OPT_Wstringop_overflow_, + : warning_at (loc, opt, (maybe ? G_("%Kspecified bound %E may exceed " "destination size %E") @@ -4105,14 +4107,14 @@ maybe_warn_for_bound (int opt, location_t loc, tree exp, tree func, exp, bndrng[0], size)); else warned = (func - ? warning_at (loc, OPT_Wstringop_overflow_, + ? warning_at (loc, opt, (maybe ? G_("%K%qD specified bound [%E, %E] may exceed " "destination size %E") : G_("%K%qD specified bound [%E, %E] exceeds " "destination size %E")), exp, func, bndrng[0], bndrng[1], size) - : warning_at (loc, OPT_Wstringop_overflow_, + : warning_at (loc, opt, (maybe ? G_("%Kspecified bound [%E, %E] exceeds " "destination size %E") @@ -4131,7 +4133,7 @@ maybe_warn_for_bound (int opt, location_t loc, tree exp, tree func, inform (EXPR_LOCATION (pad->dst.ref), "destination object allocated here"); } - TREE_NO_WARNING (exp) = true; + suppress_warning (exp, opt); } return warned; @@ -4357,7 +4359,7 @@ warn_for_access (location_t loc, tree func, tree exp, int opt, tree range[2], exp, range[0], range[1], size)); if (warned) - TREE_NO_WARNING (exp) = true; + suppress_warning (exp, OPT_Wstringop_overread); return warned; } @@ -4400,7 +4402,7 @@ warn_for_access (location_t loc, tree func, tree exp, int opt, tree range[2], exp, range[0], range[1], size)); if (warned) - TREE_NO_WARNING (exp) = true; + suppress_warning (exp, OPT_Wstringop_overread); return warned; } @@ -4779,8 +4781,10 @@ check_access (tree exp, tree dstwrite, && tree_fits_uhwi_p (dstwrite) && tree_int_cst_lt (dstwrite, range[0])))) { - if (TREE_NO_WARNING (exp) - || (pad && pad->dst.ref && TREE_NO_WARNING (pad->dst.ref))) + const opt_code opt = OPT_Wstringop_overflow_; + if (warning_suppressed_p (exp, opt) + || (pad && pad->dst.ref + && warning_suppressed_p (pad->dst.ref, opt))) return false; location_t loc = tree_inlined_location (exp); @@ -4791,12 +4795,12 @@ check_access (tree exp, tree dstwrite, and a source of unknown length. The call will write at least one byte past the end of the destination. */ warned = (func - ? warning_at (loc, OPT_Wstringop_overflow_, + ? warning_at (loc, opt, "%K%qD writing %E or more bytes into " "a region of size %E overflows " "the destination", exp, func, range[0], dstsize) - : warning_at (loc, OPT_Wstringop_overflow_, + : warning_at (loc, opt, "%Kwriting %E or more bytes into " "a region of size %E overflows " "the destination", @@ -4817,7 +4821,7 @@ check_access (tree exp, tree dstwrite, if (warned) { - TREE_NO_WARNING (exp) = true; + suppress_warning (exp, OPT_Wstringop_overflow_); if (pad) pad->dst.inform_access (pad->mode); } @@ -4852,9 +4856,9 @@ check_access (tree exp, tree dstwrite, if (size != maxobjsize && tree_int_cst_lt (size, range[0])) { - int opt = (dstwrite || mode != access_read_only - ? OPT_Wstringop_overflow_ - : OPT_Wstringop_overread); + opt_code opt = (dstwrite || mode != access_read_only + ? OPT_Wstringop_overflow_ + : OPT_Wstringop_overread); maybe_warn_for_bound (opt, loc, exp, func, range, size, pad); return false; } @@ -4890,19 +4894,21 @@ check_access (tree exp, tree dstwrite, if (overread) { - if (TREE_NO_WARNING (exp) - || (srcstr && TREE_NO_WARNING (srcstr)) - || (pad && pad->src.ref && TREE_NO_WARNING (pad->src.ref))) + const opt_code opt = OPT_Wstringop_overread; + if (warning_suppressed_p (exp, opt) + || (srcstr && warning_suppressed_p (srcstr, opt)) + || (pad && pad->src.ref + && warning_suppressed_p (pad->src.ref, opt))) return false; location_t loc = tree_inlined_location (exp); const bool read = mode == access_read_only || mode == access_read_write; const bool maybe = pad && pad->dst.parmarray; - if (warn_for_access (loc, func, exp, OPT_Wstringop_overread, range, - slen, false, read, maybe)) + if (warn_for_access (loc, func, exp, opt, range, slen, false, read, + maybe)) { - TREE_NO_WARNING (exp) = true; + suppress_warning (exp, opt); if (pad) pad->src.inform_access (access_read_only); } @@ -7462,8 +7468,7 @@ expand_builtin_strncmp (tree exp, ATTRIBUTE_UNUSED rtx target, /* Expand the library call ourselves using a stabilized argument list to avoid re-evaluating the function's arguments twice. */ tree call = build_call_nofold_loc (loc, fndecl, 3, arg1, arg2, len); - if (TREE_NO_WARNING (exp)) - TREE_NO_WARNING (call) = true; + copy_warning (call, exp); gcc_assert (TREE_CODE (call) == CALL_EXPR); CALL_EXPR_TAILCALL (call) = CALL_EXPR_TAILCALL (exp); return expand_call (call, target, target == const0_rtx); @@ -13898,10 +13903,11 @@ maybe_emit_free_warning (tree exp) else { tree alloc_decl = gimple_call_fndecl (def_stmt); - int opt = (DECL_IS_OPERATOR_NEW_P (alloc_decl) - || DECL_IS_OPERATOR_DELETE_P (dealloc_decl) - ? OPT_Wmismatched_new_delete - : OPT_Wmismatched_dealloc); + const opt_code opt = + (DECL_IS_OPERATOR_NEW_P (alloc_decl) + || DECL_IS_OPERATOR_DELETE_P (dealloc_decl) + ? OPT_Wmismatched_new_delete + : OPT_Wmismatched_dealloc); warned = warning_at (loc, opt, "%K%qD called on pointer returned " "from a mismatched allocation " @@ -14012,7 +14018,7 @@ fold_builtin_varargs (location_t loc, tree fndecl, tree *args, int nargs) { ret = build1 (NOP_EXPR, TREE_TYPE (ret), ret); SET_EXPR_LOCATION (ret, loc); - TREE_NO_WARNING (ret) = 1; + suppress_warning (ret); return ret; } return NULL_TREE; -- cgit v1.1 From 6d3bab5d5adb3e28ddb16c97b0831efdea23cf7d Mon Sep 17 00:00:00 2001 From: Martin Sebor Date: Tue, 6 Jul 2021 13:41:02 -0600 Subject: Improve warning suppression for inlined functions. Resolves: PR middle-end/98871 - Cannot silence -Wmaybe-uninitialized at declaration site PR middle-end/98512 - #pragma GCC diagnostic ignored ineffective in conjunction with alias attribute gcc/ChangeLog: * builtins.c (warn_string_no_nul): Remove %G. (maybe_warn_for_bound): Same. (warn_for_access): Same. (check_access): Same. (check_strncat_sizes): Same. (expand_builtin_strncat): Same. (expand_builtin_strncmp): Same. (expand_builtin): Same. (expand_builtin_object_size): Same. (warn_dealloc_offset): Same. (maybe_emit_free_warning): Same. * calls.c (maybe_warn_alloc_args_overflow): Same. (maybe_warn_nonstring_arg): Same. (maybe_warn_rdwr_sizes): Same. * expr.c (expand_expr_real_1): Remove %K. * gimple-fold.c (gimple_fold_builtin_strncpy): Remove %G. (gimple_fold_builtin_strncat): Same. * gimple-ssa-sprintf.c (format_directive): Same. (handle_printf_call): Same. * gimple-ssa-warn-alloca.c (pass_walloca::execute): Same. * gimple-ssa-warn-restrict.c (maybe_diag_overlap): Same. (maybe_diag_access_bounds): Same. Call gimple_location. (check_bounds_or_overlap): Same. * trans-mem.c (ipa_tm_scan_irr_block): Remove %K. Simplify. * tree-ssa-ccp.c (pass_post_ipa_warn::execute): Remove %G. * tree-ssa-strlen.c (maybe_warn_overflow): Same. (maybe_diag_stxncpy_trunc): Same. (handle_builtin_stxncpy_strncat): Same. (maybe_warn_pointless_strcmp): Same. * tree-ssa-uninit.c (maybe_warn_operand): Same. gcc/testsuite/ChangeLog: * gcc.dg/Wobjsize-1.c: Prune expected output. * gcc.dg/Warray-bounds-71.c: New test. * gcc.dg/Warray-bounds-71.h: New test header. * gcc.dg/Warray-bounds-72.c: New test. * gcc.dg/Warray-bounds-73.c: New test. * gcc.dg/Warray-bounds-74.c: New test. * gcc.dg/Warray-bounds-75.c: New test. * gcc.dg/Wfree-nonheap-object-4.c: Adjust expected output. * gcc.dg/Wfree-nonheap-object-5.c: New test. * gcc.dg/Wfree-nonheap-object-6.c: New test. * gcc.dg/pragma-diag-10.c: New test. * gcc.dg/pragma-diag-9.c: New test. * gcc.dg/uninit-suppress_3.c: New test. * gcc.dg/pr79214.c: Xfail tests. * gcc.dg/tree-ssa/builtin-sprintf-warn-27.c: New test. * gcc.dg/format/c90-printf-1.c: Adjust expected output. --- gcc/builtins.c | 342 ++++++++++++++++++++++++++++----------------------------- 1 file changed, 168 insertions(+), 174 deletions(-) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index e5e3938..e52fe42 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -1126,30 +1126,30 @@ warn_string_no_nul (location_t loc, tree expr, const char *fname, { if (wi::ltu_p (maxsiz, bndrng[0])) warned = warning_at (loc, opt, - "%K%qD specified bound %s exceeds " + "%qD specified bound %s exceeds " "maximum object size %E", - expr, func, bndstr, maxobjsize); + func, bndstr, maxobjsize); else { bool maybe = wi::to_wide (size) == bndrng[0]; warned = warning_at (loc, opt, exact - ? G_("%K%qD specified bound %s exceeds " + ? G_("%qD specified bound %s exceeds " "the size %E of unterminated array") : (maybe - ? G_("%K%qD specified bound %s may " + ? G_("%qD specified bound %s may " "exceed the size of at most %E " "of unterminated array") - : G_("%K%qD specified bound %s exceeds " + : G_("%qD specified bound %s exceeds " "the size of at most %E " "of unterminated array")), - expr, func, bndstr, size); + func, bndstr, size); } } else warned = warning_at (loc, opt, - "%K%qD argument missing terminating nul", - expr, func); + "%qD argument missing terminating nul", + func); } else { @@ -3969,35 +3969,34 @@ maybe_warn_for_bound (opt_code opt, location_t loc, tree exp, tree func, warned = (func ? warning_at (loc, opt, (maybe - ? G_("%K%qD specified bound %E may " + ? G_("%qD specified bound %E may " "exceed maximum object size %E") - : G_("%K%qD specified bound %E " + : G_("%qD specified bound %E " "exceeds maximum object size %E")), - exp, func, bndrng[0], maxobjsize) + func, bndrng[0], maxobjsize) : warning_at (loc, opt, (maybe - ? G_("%Kspecified bound %E may " + ? G_("specified bound %E may " "exceed maximum object size %E") - : G_("%Kspecified bound %E " + : G_("specified bound %E " "exceeds maximum object size %E")), - exp, bndrng[0], maxobjsize)); + bndrng[0], maxobjsize)); else warned = (func ? warning_at (loc, opt, (maybe - ? G_("%K%qD specified bound [%E, %E] may " + ? G_("%qD specified bound [%E, %E] may " "exceed maximum object size %E") - : G_("%K%qD specified bound [%E, %E] " + : G_("%qD specified bound [%E, %E] " "exceeds maximum object size %E")), - exp, func, - bndrng[0], bndrng[1], maxobjsize) + func, bndrng[0], bndrng[1], maxobjsize) : warning_at (loc, opt, (maybe - ? G_("%Kspecified bound [%E, %E] may " + ? G_("specified bound [%E, %E] may " "exceed maximum object size %E") - : G_("%Kspecified bound [%E, %E] " + : G_("specified bound [%E, %E] " "exceeds maximum object size %E")), - exp, bndrng[0], bndrng[1], maxobjsize)); + bndrng[0], bndrng[1], maxobjsize)); } else if (!size || tree_int_cst_le (bndrng[0], size)) return false; @@ -4005,34 +4004,34 @@ maybe_warn_for_bound (opt_code opt, location_t loc, tree exp, tree func, warned = (func ? warning_at (loc, opt, (maybe - ? G_("%K%qD specified bound %E may exceed " + ? G_("%qD specified bound %E may exceed " "source size %E") - : G_("%K%qD specified bound %E exceeds " + : G_("%qD specified bound %E exceeds " "source size %E")), - exp, func, bndrng[0], size) + func, bndrng[0], size) : warning_at (loc, opt, (maybe - ? G_("%Kspecified bound %E may exceed " + ? G_("specified bound %E may exceed " "source size %E") - : G_("%Kspecified bound %E exceeds " + : G_("specified bound %E exceeds " "source size %E")), - exp, bndrng[0], size)); + bndrng[0], size)); else warned = (func ? warning_at (loc, opt, (maybe - ? G_("%K%qD specified bound [%E, %E] may " + ? G_("%qD specified bound [%E, %E] may " "exceed source size %E") - : G_("%K%qD specified bound [%E, %E] exceeds " + : G_("%qD specified bound [%E, %E] exceeds " "source size %E")), - exp, func, bndrng[0], bndrng[1], size) + func, bndrng[0], bndrng[1], size) : warning_at (loc, opt, (maybe - ? G_("%Kspecified bound [%E, %E] may exceed " + ? G_("specified bound [%E, %E] may exceed " "source size %E") - : G_("%Kspecified bound [%E, %E] exceeds " + : G_("specified bound [%E, %E] exceeds " "source size %E")), - exp, bndrng[0], bndrng[1], size)); + bndrng[0], bndrng[1], size)); if (warned) { if (pad && pad->src.ref) @@ -4057,35 +4056,34 @@ maybe_warn_for_bound (opt_code opt, location_t loc, tree exp, tree func, warned = (func ? warning_at (loc, opt, (maybe - ? G_("%K%qD specified size %E may " + ? G_("%qD specified size %E may " "exceed maximum object size %E") - : G_("%K%qD specified size %E " + : G_("%qD specified size %E " "exceeds maximum object size %E")), - exp, func, bndrng[0], maxobjsize) + func, bndrng[0], maxobjsize) : warning_at (loc, opt, (maybe - ? G_("%Kspecified size %E may exceed " + ? G_("specified size %E may exceed " "maximum object size %E") - : G_("%Kspecified size %E exceeds " + : G_("specified size %E exceeds " "maximum object size %E")), - exp, bndrng[0], maxobjsize)); + bndrng[0], maxobjsize)); else warned = (func ? warning_at (loc, opt, (maybe - ? G_("%K%qD specified size between %E and %E " + ? G_("%qD specified size between %E and %E " "may exceed maximum object size %E") - : G_("%K%qD specified size between %E and %E " + : G_("%qD specified size between %E and %E " "exceeds maximum object size %E")), - exp, func, - bndrng[0], bndrng[1], maxobjsize) + func, bndrng[0], bndrng[1], maxobjsize) : warning_at (loc, opt, (maybe - ? G_("%Kspecified size between %E and %E " + ? G_("specified size between %E and %E " "may exceed maximum object size %E") - : G_("%Kspecified size between %E and %E " + : G_("specified size between %E and %E " "exceeds maximum object size %E")), - exp, bndrng[0], bndrng[1], maxobjsize)); + bndrng[0], bndrng[1], maxobjsize)); } else if (!size || tree_int_cst_le (bndrng[0], size)) return false; @@ -4093,34 +4091,34 @@ maybe_warn_for_bound (opt_code opt, location_t loc, tree exp, tree func, warned = (func ? warning_at (loc, opt, (maybe - ? G_("%K%qD specified bound %E may exceed " + ? G_("%qD specified bound %E may exceed " "destination size %E") - : G_("%K%qD specified bound %E exceeds " + : G_("%qD specified bound %E exceeds " "destination size %E")), - exp, func, bndrng[0], size) + func, bndrng[0], size) : warning_at (loc, opt, (maybe - ? G_("%Kspecified bound %E may exceed " + ? G_("specified bound %E may exceed " "destination size %E") - : G_("%Kspecified bound %E exceeds " + : G_("specified bound %E exceeds " "destination size %E")), - exp, bndrng[0], size)); + bndrng[0], size)); else warned = (func ? warning_at (loc, opt, (maybe - ? G_("%K%qD specified bound [%E, %E] may exceed " + ? G_("%qD specified bound [%E, %E] may exceed " "destination size %E") - : G_("%K%qD specified bound [%E, %E] exceeds " + : G_("%qD specified bound [%E, %E] exceeds " "destination size %E")), - exp, func, bndrng[0], bndrng[1], size) + func, bndrng[0], bndrng[1], size) : warning_at (loc, opt, (maybe - ? G_("%Kspecified bound [%E, %E] exceeds " + ? G_("specified bound [%E, %E] exceeds " "destination size %E") - : G_("%Kspecified bound [%E, %E] exceeds " + : G_("specified bound [%E, %E] exceeds " "destination size %E")), - exp, bndrng[0], bndrng[1], size)); + bndrng[0], bndrng[1], size)); if (warned) { @@ -4158,65 +4156,63 @@ warn_for_access (location_t loc, tree func, tree exp, int opt, tree range[2], warned = (func ? warning_n (loc, opt, tree_to_uhwi (range[0]), (maybe - ? G_("%K%qD may access %E byte in a region " + ? G_("%qD may access %E byte in a region " "of size %E") - : G_("%K%qD accessing %E byte in a region " + : G_("%qD accessing %E byte in a region " "of size %E")), (maybe - ? G_ ("%K%qD may access %E bytes in a region " + ? G_ ("%qD may access %E bytes in a region " "of size %E") - : G_ ("%K%qD accessing %E bytes in a region " + : G_ ("%qD accessing %E bytes in a region " "of size %E")), - exp, func, range[0], size) + func, range[0], size) : warning_n (loc, opt, tree_to_uhwi (range[0]), (maybe - ? G_("%Kmay access %E byte in a region " + ? G_("may access %E byte in a region " "of size %E") - : G_("%Kaccessing %E byte in a region " + : G_("accessing %E byte in a region " "of size %E")), (maybe - ? G_("%Kmay access %E bytes in a region " + ? G_("may access %E bytes in a region " "of size %E") - : G_("%Kaccessing %E bytes in a region " + : G_("accessing %E bytes in a region " "of size %E")), - exp, range[0], size)); + range[0], size)); else if (tree_int_cst_sign_bit (range[1])) { /* Avoid printing the upper bound if it's invalid. */ warned = (func ? warning_at (loc, opt, (maybe - ? G_("%K%qD may access %E or more bytes " + ? G_("%qD may access %E or more bytes " "in a region of size %E") - : G_("%K%qD accessing %E or more bytes " + : G_("%qD accessing %E or more bytes " "in a region of size %E")), - exp, func, range[0], size) + func, range[0], size) : warning_at (loc, opt, (maybe - ? G_("%Kmay access %E or more bytes " + ? G_("may access %E or more bytes " "in a region of size %E") - : G_("%Kaccessing %E or more bytes " + : G_("accessing %E or more bytes " "in a region of size %E")), - exp, range[0], size)); + range[0], size)); } else warned = (func ? warning_at (loc, opt, (maybe - ? G_("%K%qD may access between %E and %E " + ? G_("%qD may access between %E and %E " "bytes in a region of size %E") - : G_("%K%qD accessing between %E and %E " + : G_("%qD accessing between %E and %E " "bytes in a region of size %E")), - exp, func, range[0], range[1], - size) + func, range[0], range[1], size) : warning_at (loc, opt, (maybe - ? G_("%Kmay access between %E and %E bytes " + ? G_("may access between %E and %E bytes " "in a region of size %E") - : G_("%Kaccessing between %E and %E bytes " + : G_("accessing between %E and %E bytes " "in a region of size %E")), - exp, range[0], range[1], - size)); + range[0], range[1], size)); return warned; } @@ -4226,69 +4222,67 @@ warn_for_access (location_t loc, tree func, tree exp, int opt, tree range[2], warned = (func ? warning_n (loc, opt, tree_to_uhwi (range[0]), (maybe - ? G_("%K%qD may write %E byte into a region " + ? G_("%qD may write %E byte into a region " "of size %E") - : G_("%K%qD writing %E byte into a region " + : G_("%qD writing %E byte into a region " "of size %E overflows the destination")), (maybe - ? G_("%K%qD may write %E bytes into a region " + ? G_("%qD may write %E bytes into a region " "of size %E") - : G_("%K%qD writing %E bytes into a region " + : G_("%qD writing %E bytes into a region " "of size %E overflows the destination")), - exp, func, range[0], size) + func, range[0], size) : warning_n (loc, opt, tree_to_uhwi (range[0]), (maybe - ? G_("%Kmay write %E byte into a region " + ? G_("may write %E byte into a region " "of size %E") - : G_("%Kwriting %E byte into a region " + : G_("writing %E byte into a region " "of size %E overflows the destination")), (maybe - ? G_("%Kmay write %E bytes into a region " + ? G_("may write %E bytes into a region " "of size %E") - : G_("%Kwriting %E bytes into a region " + : G_("writing %E bytes into a region " "of size %E overflows the destination")), - exp, range[0], size)); + range[0], size)); else if (tree_int_cst_sign_bit (range[1])) { /* Avoid printing the upper bound if it's invalid. */ warned = (func ? warning_at (loc, opt, (maybe - ? G_("%K%qD may write %E or more bytes " + ? G_("%qD may write %E or more bytes " "into a region of size %E") - : G_("%K%qD writing %E or more bytes " + : G_("%qD writing %E or more bytes " "into a region of size %E overflows " "the destination")), - exp, func, range[0], size) + func, range[0], size) : warning_at (loc, opt, (maybe - ? G_("%Kmay write %E or more bytes into " + ? G_("may write %E or more bytes into " "a region of size %E") - : G_("%Kwriting %E or more bytes into " + : G_("writing %E or more bytes into " "a region of size %E overflows " "the destination")), - exp, range[0], size)); + range[0], size)); } else warned = (func ? warning_at (loc, opt, (maybe - ? G_("%K%qD may write between %E and %E bytes " + ? G_("%qD may write between %E and %E bytes " "into a region of size %E") - : G_("%K%qD writing between %E and %E bytes " + : G_("%qD writing between %E and %E bytes " "into a region of size %E overflows " "the destination")), - exp, func, range[0], range[1], - size) + func, range[0], range[1], size) : warning_at (loc, opt, (maybe - ? G_("%Kmay write between %E and %E bytes " + ? G_("may write between %E and %E bytes " "into a region of size %E") - : G_("%Kwriting between %E and %E bytes " + : G_("writing between %E and %E bytes " "into a region of size %E overflows " "the destination")), - exp, range[0], range[1], - size)); + range[0], range[1], size)); return warned; } @@ -4299,64 +4293,64 @@ warn_for_access (location_t loc, tree func, tree exp, int opt, tree range[2], ? warning_n (loc, OPT_Wstringop_overread, tree_to_uhwi (range[0]), (maybe - ? G_("%K%qD may read %E byte from a region " + ? G_("%qD may read %E byte from a region " "of size %E") - : G_("%K%qD reading %E byte from a region " + : G_("%qD reading %E byte from a region " "of size %E")), (maybe - ? G_("%K%qD may read %E bytes from a region " + ? G_("%qD may read %E bytes from a region " "of size %E") - : G_("%K%qD reading %E bytes from a region " + : G_("%qD reading %E bytes from a region " "of size %E")), - exp, func, range[0], size) + func, range[0], size) : warning_n (loc, OPT_Wstringop_overread, tree_to_uhwi (range[0]), (maybe - ? G_("%Kmay read %E byte from a region " + ? G_("may read %E byte from a region " "of size %E") - : G_("%Kreading %E byte from a region " + : G_("reading %E byte from a region " "of size %E")), (maybe - ? G_("%Kmay read %E bytes from a region " + ? G_("may read %E bytes from a region " "of size %E") - : G_("%Kreading %E bytes from a region " + : G_("reading %E bytes from a region " "of size %E")), - exp, range[0], size)); + range[0], size)); else if (tree_int_cst_sign_bit (range[1])) { /* Avoid printing the upper bound if it's invalid. */ warned = (func ? warning_at (loc, OPT_Wstringop_overread, (maybe - ? G_("%K%qD may read %E or more bytes " + ? G_("%qD may read %E or more bytes " "from a region of size %E") - : G_("%K%qD reading %E or more bytes " + : G_("%qD reading %E or more bytes " "from a region of size %E")), - exp, func, range[0], size) + func, range[0], size) : warning_at (loc, OPT_Wstringop_overread, (maybe - ? G_("%Kmay read %E or more bytes " + ? G_("may read %E or more bytes " "from a region of size %E") - : G_("%Kreading %E or more bytes " + : G_("reading %E or more bytes " "from a region of size %E")), - exp, range[0], size)); + range[0], size)); } else warned = (func ? warning_at (loc, OPT_Wstringop_overread, (maybe - ? G_("%K%qD may read between %E and %E bytes " + ? G_("%qD may read between %E and %E bytes " "from a region of size %E") - : G_("%K%qD reading between %E and %E bytes " + : G_("%qD reading between %E and %E bytes " "from a region of size %E")), - exp, func, range[0], range[1], size) + func, range[0], range[1], size) : warning_at (loc, opt, (maybe - ? G_("%Kmay read between %E and %E bytes " + ? G_("may read between %E and %E bytes " "from a region of size %E") - : G_("%Kreading between %E and %E bytes " + : G_("reading between %E and %E bytes " "from a region of size %E")), - exp, range[0], range[1], size)); + range[0], range[1], size)); if (warned) suppress_warning (exp, OPT_Wstringop_overread); @@ -4369,37 +4363,37 @@ warn_for_access (location_t loc, tree func, tree exp, int opt, tree range[2], warned = (func ? warning_n (loc, OPT_Wstringop_overread, tree_to_uhwi (range[0]), - "%K%qD expecting %E byte in a region of size %E", - "%K%qD expecting %E bytes in a region of size %E", - exp, func, range[0], size) + "%qD expecting %E byte in a region of size %E", + "%qD expecting %E bytes in a region of size %E", + func, range[0], size) : warning_n (loc, OPT_Wstringop_overread, tree_to_uhwi (range[0]), - "%Kexpecting %E byte in a region of size %E", - "%Kexpecting %E bytes in a region of size %E", - exp, range[0], size)); + "expecting %E byte in a region of size %E", + "expecting %E bytes in a region of size %E", + range[0], size)); else if (tree_int_cst_sign_bit (range[1])) { /* Avoid printing the upper bound if it's invalid. */ warned = (func ? warning_at (loc, OPT_Wstringop_overread, - "%K%qD expecting %E or more bytes in a region " + "%qD expecting %E or more bytes in a region " "of size %E", - exp, func, range[0], size) + func, range[0], size) : warning_at (loc, OPT_Wstringop_overread, - "%Kexpecting %E or more bytes in a region " + "expecting %E or more bytes in a region " "of size %E", - exp, range[0], size)); + range[0], size)); } else warned = (func ? warning_at (loc, OPT_Wstringop_overread, - "%K%qD expecting between %E and %E bytes in " + "%qD expecting between %E and %E bytes in " "a region of size %E", - exp, func, range[0], range[1], size) + func, range[0], range[1], size) : warning_at (loc, OPT_Wstringop_overread, - "%Kexpecting between %E and %E bytes in " + "expecting between %E and %E bytes in " "a region of size %E", - exp, range[0], range[1], size)); + range[0], range[1], size)); if (warned) suppress_warning (exp, OPT_Wstringop_overread); @@ -4759,7 +4753,7 @@ check_access (tree exp, tree dstwrite, && TREE_CODE (range[0]) == INTEGER_CST && tree_int_cst_lt (maxobjsize, range[0])) { - location_t loc = tree_inlined_location (exp); + location_t loc = EXPR_LOCATION (exp); maybe_warn_for_bound (OPT_Wstringop_overflow_, loc, exp, func, range, NULL_TREE, pad); return false; @@ -4787,7 +4781,7 @@ check_access (tree exp, tree dstwrite, && warning_suppressed_p (pad->dst.ref, opt))) return false; - location_t loc = tree_inlined_location (exp); + location_t loc = EXPR_LOCATION (exp); bool warned = false; if (dstwrite == slen && at_least_one) { @@ -4796,15 +4790,15 @@ check_access (tree exp, tree dstwrite, at least one byte past the end of the destination. */ warned = (func ? warning_at (loc, opt, - "%K%qD writing %E or more bytes into " + "%qD writing %E or more bytes into " "a region of size %E overflows " "the destination", - exp, func, range[0], dstsize) + func, range[0], dstsize) : warning_at (loc, opt, - "%Kwriting %E or more bytes into " + "writing %E or more bytes into " "a region of size %E overflows " "the destination", - exp, range[0], dstsize)); + range[0], dstsize)); } else { @@ -4840,7 +4834,7 @@ check_access (tree exp, tree dstwrite, PAD is nonnull and BNDRNG is valid. */ get_size_range (maxread, range, pad ? pad->src.bndrng : NULL); - location_t loc = tree_inlined_location (exp); + location_t loc = EXPR_LOCATION (exp); tree size = dstsize; if (pad && pad->mode == access_read_only) size = wide_int_to_tree (sizetype, pad->src.sizrng[1]); @@ -4901,7 +4895,7 @@ check_access (tree exp, tree dstwrite, && warning_suppressed_p (pad->src.ref, opt))) return false; - location_t loc = tree_inlined_location (exp); + location_t loc = EXPR_LOCATION (exp); const bool read = mode == access_read_only || mode == access_read_write; const bool maybe = pad && pad->dst.parmarray; @@ -6481,10 +6475,10 @@ check_strncat_sizes (tree exp, tree objsize) if (tree_fits_uhwi_p (maxread) && tree_fits_uhwi_p (objsize) && tree_int_cst_equal (objsize, maxread)) { - location_t loc = tree_inlined_location (exp); + location_t loc = EXPR_LOCATION (exp); warning_at (loc, OPT_Wstringop_overflow_, - "%K%qD specified bound %E equals destination size", - exp, get_callee_fndecl (exp), maxread); + "%qD specified bound %E equals destination size", + get_callee_fndecl (exp), maxread); return false; } @@ -6554,10 +6548,10 @@ expand_builtin_strncat (tree exp, rtx) if (tree_fits_uhwi_p (maxread) && tree_fits_uhwi_p (destsize) && tree_int_cst_equal (destsize, maxread)) { - location_t loc = tree_inlined_location (exp); + location_t loc = EXPR_LOCATION (exp); warning_at (loc, OPT_Wstringop_overflow_, - "%K%qD specified bound %E equals destination size", - exp, get_callee_fndecl (exp), maxread); + "%qD specified bound %E equals destination size", + get_callee_fndecl (exp), maxread); return NULL_RTX; } @@ -7330,7 +7324,7 @@ expand_builtin_strncmp (tree exp, ATTRIBUTE_UNUSED rtx target, || !check_nul_terminated_array (exp, arg2, arg3)) return NULL_RTX; - location_t loc = tree_inlined_location (exp); + location_t loc = EXPR_LOCATION (exp); tree len1 = c_strlen (arg1, 1); tree len2 = c_strlen (arg2, 1); @@ -10006,13 +10000,13 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode, case BUILT_IN_VA_ARG_PACK: /* All valid uses of __builtin_va_arg_pack () are removed during inlining. */ - error ("%Kinvalid use of %<__builtin_va_arg_pack ()%>", exp); + error ("invalid use of %<__builtin_va_arg_pack ()%>"); return const0_rtx; case BUILT_IN_VA_ARG_PACK_LEN: /* All valid uses of __builtin_va_arg_pack_len () are removed during inlining. */ - error ("%Kinvalid use of %<__builtin_va_arg_pack_len ()%>", exp); + error ("invalid use of %<__builtin_va_arg_pack_len ()%>"); return const0_rtx; /* Return the address of the first anonymous stack arg. */ @@ -12961,8 +12955,8 @@ expand_builtin_object_size (tree exp) if (!validate_arglist (exp, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) { - error ("%Kfirst argument of %qD must be a pointer, second integer constant", - exp, fndecl); + error ("first argument of %qD must be a pointer, second integer constant", + fndecl); expand_builtin_trap (); return const0_rtx; } @@ -12974,8 +12968,8 @@ expand_builtin_object_size (tree exp) || tree_int_cst_sgn (ost) < 0 || compare_tree_int (ost, 3) > 0) { - error ("%Klast argument of %qD is not integer constant between 0 and 3", - exp, fndecl); + error ("last argument of %qD is not integer constant between 0 and 3", + fndecl); expand_builtin_trap (); return const0_rtx; } @@ -13787,8 +13781,8 @@ warn_dealloc_offset (location_t loc, tree exp, const access_ref &aref) } if (!warning_at (loc, OPT_Wfree_nonheap_object, - "%K%qD called on pointer %qE with nonzero offset%s", - exp, dealloc_decl, aref.ref, offstr)) + "%qD called on pointer %qE with nonzero offset%s", + dealloc_decl, aref.ref, offstr)) return false; if (DECL_P (aref.ref)) @@ -13843,15 +13837,15 @@ maybe_emit_free_warning (tree exp) return; tree dealloc_decl = get_callee_fndecl (exp); - location_t loc = tree_inlined_location (exp); + location_t loc = EXPR_LOCATION (exp); if (DECL_P (ref) || EXPR_P (ref)) { /* Diagnose freeing a declared object. */ if (aref.ref_declared () && warning_at (loc, OPT_Wfree_nonheap_object, - "%K%qD called on unallocated object %qD", - exp, dealloc_decl, ref)) + "%qD called on unallocated object %qD", + dealloc_decl, ref)) { loc = (DECL_P (ref) ? DECL_SOURCE_LOCATION (ref) @@ -13870,8 +13864,8 @@ maybe_emit_free_warning (tree exp) else if (CONSTANT_CLASS_P (ref)) { if (warning_at (loc, OPT_Wfree_nonheap_object, - "%K%qD called on a pointer to an unallocated " - "object %qE", exp, dealloc_decl, ref)) + "%qD called on a pointer to an unallocated " + "object %qE", dealloc_decl, ref)) { if (TREE_CODE (ptr) == SSA_NAME) { @@ -13909,18 +13903,18 @@ maybe_emit_free_warning (tree exp) ? OPT_Wmismatched_new_delete : OPT_Wmismatched_dealloc); warned = warning_at (loc, opt, - "%K%qD called on pointer returned " + "%qD called on pointer returned " "from a mismatched allocation " - "function", exp, dealloc_decl); + "function", dealloc_decl); } } else if (gimple_call_builtin_p (def_stmt, BUILT_IN_ALLOCA) || gimple_call_builtin_p (def_stmt, BUILT_IN_ALLOCA_WITH_ALIGN)) warned = warning_at (loc, OPT_Wfree_nonheap_object, - "%K%qD called on pointer to " + "%qD called on pointer to " "an unallocated object", - exp, dealloc_decl); + dealloc_decl); else if (warn_dealloc_offset (loc, exp, aref)) return; -- cgit v1.1 From a110855667782dac7b674d3e328b253b3b3c919b Mon Sep 17 00:00:00 2001 From: Martin Sebor Date: Wed, 7 Jul 2021 14:05:25 -0600 Subject: Correct handling of variable offset minus constant in -Warray-bounds [PR100137] Resolves: PR tree-optimization/100137 - -Warray-bounds false positive on varying offset plus negative PR tree-optimization/99121 - ICE in -Warray-bounds on a multidimensional PR tree-optimization/97027 - missing warning on buffer overflow storing a larger scalar into a smaller array gcc/ChangeLog: PR tree-optimization/100137 PR tree-optimization/99121 PR tree-optimization/97027 * builtins.c (access_ref::access_ref): Also set offmax. (access_ref::offset_in_range): Define new function. (access_ref::add_offset): Set offmax. (access_ref::inform_access): Handle access_none. (handle_mem_ref): Clear ostype. (compute_objsize_r): Handle ASSERT_EXPR. * builtins.h (struct access_ref): Add offmax member. * gimple-array-bounds.cc (array_bounds_checker::check_mem_ref): Use compute_objsize() and simplify. gcc/testsuite/ChangeLog: PR tree-optimization/100137 PR tree-optimization/99121 PR tree-optimization/97027 * c-c++-common/Warray-bounds-3.c: Remove xfail * c-c++-common/Warray-bounds-4.c: Add an expected warning. * c-c++-common/Warray-bounds-9.c: New test. * c-c++-common/Warray-bounds-10.c: New test. * g++.dg/asan/asan_test.C: Suppress expected warnings. * g++.dg/pr95768.C: Same. * g++.dg/warn/Warray-bounds-10.C: Adjust text of expected messages. * g++.dg/warn/Warray-bounds-11.C: Same. * g++.dg/warn/Warray-bounds-12.C: Same. * g++.dg/warn/Warray-bounds-13.C: Same. * g++.dg/warn/Warray-bounds-17.C: Same. * g++.dg/warn/Warray-bounds-20.C: Same. * gcc.dg/Warray-bounds-29.c: Same. * gcc.dg/Warray-bounds-30.c: Add xfail. * gcc.dg/Warray-bounds-31.c: Adjust text of expected messages. * gcc.dg/Warray-bounds-32.c: Same. * gcc.dg/Warray-bounds-52.c: Same. * gcc.dg/Warray-bounds-53.c: Same. * gcc.dg/Warray-bounds-58.c: Remove xfail. * gcc.dg/Warray-bounds-63.c: Adjust text of expected messages. * gcc.dg/Warray-bounds-66.c: Same. * gcc.dg/Warray-bounds-69.c: Same. * gcc.dg/Wstringop-overflow-34.c: Same. * gcc.dg/Wstringop-overflow-47.c: Same. * gcc.dg/Wstringop-overflow-61.c: Same. * gcc.dg/Warray-bounds-77.c: New test. * gcc.dg/Warray-bounds-78.c: New test. * gcc.dg/Warray-bounds-79.c: New test. --- gcc/builtins.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 61 insertions(+), 8 deletions(-) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index e52fe42..39ab139 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -206,6 +206,7 @@ access_ref::access_ref (tree bound /* = NULL_TREE */, { /* Set to valid. */ offrng[0] = offrng[1] = 0; + offmax[0] = offmax[1] = 0; /* Invalidate. */ sizrng[0] = sizrng[1] = -1; @@ -457,6 +458,21 @@ access_ref::size_remaining (offset_int *pmin /* = NULL */) const return sizrng[1] - or0; } +/* Return true if the offset and object size are in range for SIZE. */ + +bool +access_ref::offset_in_range (const offset_int &size) const +{ + if (size_remaining () < size) + return false; + + if (base0) + return offmax[0] >= 0 && offmax[1] <= sizrng[1]; + + offset_int maxoff = wi::to_offset (TYPE_MAX_VALUE (ptrdiff_type_node)); + return offmax[0] > -maxoff && offmax[1] < maxoff; +} + /* Add the range [MIN, MAX] to the offset range. For known objects (with zero-based offsets) at least one of whose offset's bounds is in range, constrain the other (or both) to the bounds of the object (i.e., zero @@ -493,6 +509,8 @@ void access_ref::add_offset (const offset_int &min, const offset_int &max) if (max >= 0) { offrng[0] = 0; + if (offmax[0] > 0) + offmax[0] = 0; return; } @@ -509,6 +527,12 @@ void access_ref::add_offset (const offset_int &min, const offset_int &max) offrng[0] = 0; } + /* Set the minimum and maximmum computed so far. */ + if (offrng[1] < 0 && offrng[1] < offmax[0]) + offmax[0] = offrng[1]; + if (offrng[0] > 0 && offrng[0] > offmax[1]) + offmax[1] = offrng[0]; + if (!base0) return; @@ -4571,23 +4595,46 @@ access_ref::inform_access (access_mode mode) const return; } + if (mode == access_read_only) + { + if (allocfn == NULL_TREE) + { + if (*offstr) + inform (loc, "at offset %s into source object %qE of size %s", + offstr, ref, sizestr); + else + inform (loc, "source object %qE of size %s", ref, sizestr); + + return; + } + + if (*offstr) + inform (loc, + "at offset %s into source object of size %s allocated by %qE", + offstr, sizestr, allocfn); + else + inform (loc, "source object of size %s allocated by %qE", + sizestr, allocfn); + return; + } + if (allocfn == NULL_TREE) { if (*offstr) - inform (loc, "at offset %s into source object %qE of size %s", + inform (loc, "at offset %s into object %qE of size %s", offstr, ref, sizestr); else - inform (loc, "source object %qE of size %s", ref, sizestr); + inform (loc, "object %qE of size %s", ref, sizestr); return; } if (*offstr) inform (loc, - "at offset %s into source object of size %s allocated by %qE", + "at offset %s into object of size %s allocated by %qE", offstr, sizestr, allocfn); else - inform (loc, "source object of size %s allocated by %qE", + inform (loc, "object of size %s allocated by %qE", sizestr, allocfn); } @@ -5433,16 +5480,16 @@ handle_mem_ref (tree mref, int ostype, access_ref *pref, if (VECTOR_TYPE_P (TREE_TYPE (mref))) { - /* Hack: Give up for MEM_REFs of vector types; those may be - synthesized from multiple assignments to consecutive data - members (see PR 93200 and 96963). + /* Hack: Handle MEM_REFs of vector types as those to complete + objects; those may be synthesized from multiple assignments + to consecutive data members (see PR 93200 and 96963). FIXME: Vectorized assignments should only be present after vectorization so this hack is only necessary after it has run and could be avoided in calls from prior passes (e.g., tree-ssa-strlen.c). FIXME: Deal with this more generally, e.g., by marking up such MEM_REFs at the time they're created. */ - return false; + ostype = 0; } tree mrefop = TREE_OPERAND (mref, 0); @@ -5796,6 +5843,12 @@ compute_objsize_r (tree ptr, int ostype, access_ref *pref, tree rhs = gimple_assign_rhs1 (stmt); + if (code == ASSERT_EXPR) + { + rhs = TREE_OPERAND (rhs, 0); + return compute_objsize_r (rhs, ostype, pref, snlim, qry); + } + if (code == POINTER_PLUS_EXPR && TREE_CODE (TREE_TYPE (rhs)) == POINTER_TYPE) { -- cgit v1.1 From 8bf5b49ebd2176b8c535147377381dd07fbdd643 Mon Sep 17 00:00:00 2001 From: Martin Sebor Date: Tue, 20 Jul 2021 13:48:20 -0600 Subject: Correct stpcpy offset computation for -Warray-bounds et al. [PR101397]. Resolves: PR middle-end/101397 - spurious warning writing to the result of stpcpy minus 1 gcc/ChangeLog: PR middle-end/101397 * builtins.c (gimple_call_return_array): Add argument. Correct offsets for memchr, mempcpy, stpcpy, and stpncpy. (compute_objsize_r): Adjust offset computation for argument returning built-ins. gcc/testsuite/ChangeLog: PR middle-end/101397 * gcc.dg/Warray-bounds-80.c: New test. * gcc.dg/Warray-bounds-81.c: New test. * gcc.dg/Warray-bounds-82.c: New test. * gcc.dg/Warray-bounds-83.c: New test. * gcc.dg/Warray-bounds-84.c: New test. * gcc.dg/Wstringop-overflow-46.c: Adjust expected output. --- gcc/builtins.c | 103 +++++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 86 insertions(+), 17 deletions(-) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index 39ab139..170d776 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -5200,12 +5200,19 @@ get_offset_range (tree x, gimple *stmt, offset_int r[2], range_query *rvals) /* Return the argument that the call STMT to a built-in function returns or null if it doesn't. On success, set OFFRNG[] to the range of offsets from the argument reflected in the value returned by the built-in if it - can be determined, otherwise to 0 and HWI_M1U respectively. */ + can be determined, otherwise to 0 and HWI_M1U respectively. Set + *PAST_END for functions like mempcpy that might return a past the end + pointer (most functions return a dereferenceable pointer to an existing + element of an array). */ static tree -gimple_call_return_array (gimple *stmt, offset_int offrng[2], +gimple_call_return_array (gimple *stmt, offset_int offrng[2], bool *past_end, range_query *rvals) { + /* Clear and set below for the rare function(s) that might return + a past-the-end pointer. */ + *past_end = false; + { /* Check for attribute fn spec to see if the function returns one of its arguments. */ @@ -5213,6 +5220,7 @@ gimple_call_return_array (gimple *stmt, offset_int offrng[2], unsigned int argno; if (fnspec.returns_arg (&argno)) { + /* Functions return the first argument (not a range). */ offrng[0] = offrng[1] = 0; return gimple_call_arg (stmt, argno); } @@ -5242,6 +5250,7 @@ gimple_call_return_array (gimple *stmt, offset_int offrng[2], if (gimple_call_num_args (stmt) != 2) return NULL_TREE; + /* Allocation functions return a pointer to the beginning. */ offrng[0] = offrng[1] = 0; return gimple_call_arg (stmt, 1); } @@ -5253,10 +5262,6 @@ gimple_call_return_array (gimple *stmt, offset_int offrng[2], case BUILT_IN_MEMMOVE: case BUILT_IN_MEMMOVE_CHK: case BUILT_IN_MEMSET: - case BUILT_IN_STPCPY: - case BUILT_IN_STPCPY_CHK: - case BUILT_IN_STPNCPY: - case BUILT_IN_STPNCPY_CHK: case BUILT_IN_STRCAT: case BUILT_IN_STRCAT_CHK: case BUILT_IN_STRCPY: @@ -5265,18 +5270,34 @@ gimple_call_return_array (gimple *stmt, offset_int offrng[2], case BUILT_IN_STRNCAT_CHK: case BUILT_IN_STRNCPY: case BUILT_IN_STRNCPY_CHK: + /* Functions return the first argument (not a range). */ offrng[0] = offrng[1] = 0; return gimple_call_arg (stmt, 0); case BUILT_IN_MEMPCPY: case BUILT_IN_MEMPCPY_CHK: { + /* The returned pointer is in a range constrained by the smaller + of the upper bound of the size argument and the source object + size. */ + offrng[0] = 0; + offrng[1] = HOST_WIDE_INT_M1U; tree off = gimple_call_arg (stmt, 2); - if (!get_offset_range (off, stmt, offrng, rvals)) + bool off_valid = get_offset_range (off, stmt, offrng, rvals); + if (!off_valid || offrng[0] != offrng[1]) { - offrng[0] = 0; - offrng[1] = HOST_WIDE_INT_M1U; + /* If the offset is either indeterminate or in some range, + try to constrain its upper bound to at most the size + of the source object. */ + access_ref aref; + tree src = gimple_call_arg (stmt, 1); + if (compute_objsize (src, 1, &aref, rvals) + && aref.sizrng[1] < offrng[1]) + offrng[1] = aref.sizrng[1]; } + + /* Mempcpy may return a past-the-end pointer. */ + *past_end = true; return gimple_call_arg (stmt, 0); } @@ -5284,23 +5305,63 @@ gimple_call_return_array (gimple *stmt, offset_int offrng[2], { tree off = gimple_call_arg (stmt, 2); if (get_offset_range (off, stmt, offrng, rvals)) - offrng[0] = 0; + offrng[1] -= 1; else - { - offrng[0] = 0; - offrng[1] = HOST_WIDE_INT_M1U; - } + offrng[1] = HOST_WIDE_INT_M1U; + + offrng[0] = 0; return gimple_call_arg (stmt, 0); } case BUILT_IN_STRCHR: case BUILT_IN_STRRCHR: case BUILT_IN_STRSTR: + offrng[0] = 0; + offrng[1] = HOST_WIDE_INT_M1U; + return gimple_call_arg (stmt, 0); + + case BUILT_IN_STPCPY: + case BUILT_IN_STPCPY_CHK: { + access_ref aref; + tree src = gimple_call_arg (stmt, 1); + if (compute_objsize (src, 1, &aref, rvals)) + offrng[1] = aref.sizrng[1] - 1; + else + offrng[1] = HOST_WIDE_INT_M1U; + offrng[0] = 0; + return gimple_call_arg (stmt, 0); + } + + case BUILT_IN_STPNCPY: + case BUILT_IN_STPNCPY_CHK: + { + /* The returned pointer is in a range between the first argument + and it plus the smaller of the upper bound of the size argument + and the source object size. */ offrng[1] = HOST_WIDE_INT_M1U; + tree off = gimple_call_arg (stmt, 2); + if (!get_offset_range (off, stmt, offrng, rvals) + || offrng[0] != offrng[1]) + { + /* If the offset is either indeterminate or in some range, + try to constrain its upper bound to at most the size + of the source object. */ + access_ref aref; + tree src = gimple_call_arg (stmt, 1); + if (compute_objsize (src, 1, &aref, rvals) + && aref.sizrng[1] < offrng[1]) + offrng[1] = aref.sizrng[1]; + } + + /* When the source is the empty string the returned pointer is + a copy of the argument. Otherwise stpcpy can also return + a past-the-end pointer. */ + offrng[0] = 0; + *past_end = true; + return gimple_call_arg (stmt, 0); } - return gimple_call_arg (stmt, 0); default: break; @@ -5753,9 +5814,12 @@ compute_objsize_r (tree ptr, int ostype, access_ref *pref, /* For functions known to return one of their pointer arguments try to determine what the returned pointer points to, and on success add OFFRNG which was set to the offset added by - the function (e.g., memchr) to the overall offset. */ + the function (e.g., memchr or stpcpy) to the overall offset. + */ + bool past_end; offset_int offrng[2]; - if (tree ret = gimple_call_return_array (stmt, offrng, rvals)) + if (tree ret = gimple_call_return_array (stmt, offrng, + &past_end, rvals)) { if (!compute_objsize_r (ret, ostype, pref, snlim, qry)) return false; @@ -5764,6 +5828,11 @@ compute_objsize_r (tree ptr, int ostype, access_ref *pref, the object. */ offset_int remrng[2]; remrng[1] = pref->size_remaining (remrng); + if (remrng[1] != 0 && !past_end) + /* Decrement the size for functions that never return + a past-the-end pointer. */ + remrng[1] -= 1; + if (remrng[1] < offrng[1]) offrng[1] = remrng[1]; pref->add_offset (offrng[0], offrng[1]); -- cgit v1.1 From 2a837de28ee94b4ec201059a9a7aaa852e6808da Mon Sep 17 00:00:00 2001 From: Martin Sebor Date: Wed, 28 Jul 2021 15:28:10 -0600 Subject: Add new gimple-ssa-warn-access pass. gcc/ChangeLog: * Makefile.in (OBJS): Add gimple-ssa-warn-access.o and pointer-query.o. * attribs.h (fndecl_dealloc_argno): Move fndecl_dealloc_argno to tree.h. * builtins.c (compute_objsize_r): Move to pointer-query.cc. (access_ref::access_ref): Same. (access_ref::phi): Same. (access_ref::get_ref): Same. (access_ref::size_remaining): Same. (access_ref::offset_in_range): Same. (access_ref::add_offset): Same. (access_ref::inform_access): Same. (ssa_name_limit_t::visit_phi): Same. (ssa_name_limit_t::leave_phi): Same. (ssa_name_limit_t::next): Same. (ssa_name_limit_t::next_phi): Same. (ssa_name_limit_t::~ssa_name_limit_t): Same. (pointer_query::pointer_query): Same. (pointer_query::get_ref): Same. (pointer_query::put_ref): Same. (pointer_query::flush_cache): Same. (warn_string_no_nul): Move to gimple-ssa-warn-access.cc. (check_nul_terminated_array): Same. (unterminated_array): Same. (maybe_warn_for_bound): Same. (check_read_access): Same. (warn_for_access): Same. (get_size_range): Same. (check_access): Same. (gimple_call_alloc_size): Move to tree.c. (gimple_parm_array_size): Move to pointer-query.cc. (get_offset_range): Same. (gimple_call_return_array): Same. (handle_min_max_size): Same. (handle_array_ref): Same. (handle_mem_ref): Same. (compute_objsize): Same. (gimple_call_alloc_p): Move to gimple-ssa-warn-access.cc. (call_dealloc_argno): Same. (fndecl_dealloc_argno): Same. (new_delete_mismatch_p): Same. (matching_alloc_calls_p): Same. (warn_dealloc_offset): Same. (maybe_emit_free_warning): Same. * builtins.h (check_nul_terminated_array): Move to gimple-ssa-warn-access.h. (check_nul_terminated_array): Same. (warn_string_no_nul): Same. (unterminated_array): Same. (class ssa_name_limit_t): Same. (class pointer_query): Same. (struct access_ref): Same. (class range_query): Same. (struct access_data): Same. (gimple_call_alloc_size): Same. (gimple_parm_array_size): Same. (compute_objsize): Same. (class access_data): Same. (maybe_emit_free_warning): Same. * calls.c (initialize_argument_information): Remove call to maybe_emit_free_warning. * gimple-array-bounds.cc: Include new header.. * gimple-fold.c: Same. * gimple-ssa-sprintf.c: Same. * gimple-ssa-warn-restrict.c: Same. * passes.def: Add pass_warn_access. * tree-pass.h (make_pass_warn_access): Declare. * tree-ssa-strlen.c: Include new headers. * tree.c (fndecl_dealloc_argno): Move here from builtins.c. * tree.h (fndecl_dealloc_argno): Move here from attribs.h. * gimple-ssa-warn-access.cc: New file. * gimple-ssa-warn-access.h: New file. * pointer-query.cc: New file. * pointer-query.h: New file. gcc/cp/ChangeLog: * init.c: Include new header. --- gcc/builtins.c | 15209 +++++++++++++++++++++---------------------------------- 1 file changed, 5847 insertions(+), 9362 deletions(-) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index 170d776..845a8bb 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -80,6 +80,8 @@ along with GCC; see the file COPYING3. If not see #include "attr-fnspec.h" #include "demangle.h" #include "gimple-range.h" +#include "pointer-query.h" +#include "gimple-ssa-warn-access.h" struct target_builtins default_target_builtins; #if SWITCHABLE_TARGET @@ -185,8 +187,6 @@ static void maybe_emit_chk_warning (tree, enum built_in_function); static void maybe_emit_sprintf_chk_warning (tree, enum built_in_function); static tree fold_builtin_object_size (tree, tree); static bool check_read_access (tree, tree, tree = NULL_TREE, int = 1); -static bool compute_objsize_r (tree, int, access_ref *, ssa_name_limit_t &, - pointer_query *); unsigned HOST_WIDE_INT target_newline; unsigned HOST_WIDE_INT target_percent; @@ -199,565 +199,6 @@ static tree do_mpfr_remquo (tree, tree, tree); static tree do_mpfr_lgamma_r (tree, tree, tree); static void expand_builtin_sync_synchronize (void); -access_ref::access_ref (tree bound /* = NULL_TREE */, - bool minaccess /* = false */) -: ref (), eval ([](tree x){ return x; }), deref (), trail1special (true), - base0 (true), parmarray () -{ - /* Set to valid. */ - offrng[0] = offrng[1] = 0; - offmax[0] = offmax[1] = 0; - /* Invalidate. */ - sizrng[0] = sizrng[1] = -1; - - /* Set the default bounds of the access and adjust below. */ - bndrng[0] = minaccess ? 1 : 0; - bndrng[1] = HOST_WIDE_INT_M1U; - - /* When BOUND is nonnull and a range can be extracted from it, - set the bounds of the access to reflect both it and MINACCESS. - BNDRNG[0] is the size of the minimum access. */ - tree rng[2]; - if (bound && get_size_range (bound, rng, SR_ALLOW_ZERO)) - { - bndrng[0] = wi::to_offset (rng[0]); - bndrng[1] = wi::to_offset (rng[1]); - bndrng[0] = bndrng[0] > 0 && minaccess ? 1 : 0; - } -} - -/* Return the PHI node REF refers to or null if it doesn't. */ - -gphi * -access_ref::phi () const -{ - if (!ref || TREE_CODE (ref) != SSA_NAME) - return NULL; - - gimple *def_stmt = SSA_NAME_DEF_STMT (ref); - if (gimple_code (def_stmt) != GIMPLE_PHI) - return NULL; - - return as_a (def_stmt); -} - -/* Determine and return the largest object to which *THIS. If *THIS - refers to a PHI and PREF is nonnull, fill *PREF with the details - of the object determined by compute_objsize(ARG, OSTYPE) for each - PHI argument ARG. */ - -tree -access_ref::get_ref (vec *all_refs, - access_ref *pref /* = NULL */, - int ostype /* = 1 */, - ssa_name_limit_t *psnlim /* = NULL */, - pointer_query *qry /* = NULL */) const -{ - gphi *phi_stmt = this->phi (); - if (!phi_stmt) - return ref; - - /* FIXME: Calling get_ref() with a null PSNLIM is dangerous and might - cause unbounded recursion. */ - ssa_name_limit_t snlim_buf; - if (!psnlim) - psnlim = &snlim_buf; - - if (!psnlim->visit_phi (ref)) - return NULL_TREE; - - /* Reflects the range of offsets of all PHI arguments refer to the same - object (i.e., have the same REF). */ - access_ref same_ref; - /* The conservative result of the PHI reflecting the offset and size - of the largest PHI argument, regardless of whether or not they all - refer to the same object. */ - pointer_query empty_qry; - if (!qry) - qry = &empty_qry; - - access_ref phi_ref; - if (pref) - { - phi_ref = *pref; - same_ref = *pref; - } - - /* Set if any argument is a function array (or VLA) parameter not - declared [static]. */ - bool parmarray = false; - /* The size of the smallest object referenced by the PHI arguments. */ - offset_int minsize = 0; - const offset_int maxobjsize = wi::to_offset (max_object_size ()); - /* The offset of the PHI, not reflecting those of its arguments. */ - const offset_int orng[2] = { phi_ref.offrng[0], phi_ref.offrng[1] }; - - const unsigned nargs = gimple_phi_num_args (phi_stmt); - for (unsigned i = 0; i < nargs; ++i) - { - access_ref phi_arg_ref; - tree arg = gimple_phi_arg_def (phi_stmt, i); - if (!compute_objsize_r (arg, ostype, &phi_arg_ref, *psnlim, qry) - || phi_arg_ref.sizrng[0] < 0) - /* A PHI with all null pointer arguments. */ - return NULL_TREE; - - /* Add PREF's offset to that of the argument. */ - phi_arg_ref.add_offset (orng[0], orng[1]); - if (TREE_CODE (arg) == SSA_NAME) - qry->put_ref (arg, phi_arg_ref); - - if (all_refs) - all_refs->safe_push (phi_arg_ref); - - const bool arg_known_size = (phi_arg_ref.sizrng[0] != 0 - || phi_arg_ref.sizrng[1] != maxobjsize); - - parmarray |= phi_arg_ref.parmarray; - - const bool nullp = integer_zerop (arg) && (i || i + 1 < nargs); - - if (phi_ref.sizrng[0] < 0) - { - if (!nullp) - same_ref = phi_arg_ref; - phi_ref = phi_arg_ref; - if (arg_known_size) - minsize = phi_arg_ref.sizrng[0]; - continue; - } - - const bool phi_known_size = (phi_ref.sizrng[0] != 0 - || phi_ref.sizrng[1] != maxobjsize); - - if (phi_known_size && phi_arg_ref.sizrng[0] < minsize) - minsize = phi_arg_ref.sizrng[0]; - - /* Disregard null pointers in PHIs with two or more arguments. - TODO: Handle this better! */ - if (nullp) - continue; - - /* Determine the amount of remaining space in the argument. */ - offset_int argrem[2]; - argrem[1] = phi_arg_ref.size_remaining (argrem); - - /* Determine the amount of remaining space computed so far and - if the remaining space in the argument is more use it instead. */ - offset_int phirem[2]; - phirem[1] = phi_ref.size_remaining (phirem); - - if (phi_arg_ref.ref != same_ref.ref) - same_ref.ref = NULL_TREE; - - if (phirem[1] < argrem[1] - || (phirem[1] == argrem[1] - && phi_ref.sizrng[1] < phi_arg_ref.sizrng[1])) - /* Use the argument with the most space remaining as the result, - or the larger one if the space is equal. */ - phi_ref = phi_arg_ref; - - /* Set SAME_REF.OFFRNG to the maximum range of all arguments. */ - if (phi_arg_ref.offrng[0] < same_ref.offrng[0]) - same_ref.offrng[0] = phi_arg_ref.offrng[0]; - if (same_ref.offrng[1] < phi_arg_ref.offrng[1]) - same_ref.offrng[1] = phi_arg_ref.offrng[1]; - } - - if (!same_ref.ref && same_ref.offrng[0] != 0) - /* Clear BASE0 if not all the arguments refer to the same object and - if not all their offsets are zero-based. This allows the final - PHI offset to out of bounds for some arguments but not for others - (or negative even of all the arguments are BASE0), which is overly - permissive. */ - phi_ref.base0 = false; - - if (same_ref.ref) - phi_ref = same_ref; - else - { - /* Replace the lower bound of the largest argument with the size - of the smallest argument, and set PARMARRAY if any argument - was one. */ - phi_ref.sizrng[0] = minsize; - phi_ref.parmarray = parmarray; - } - - if (phi_ref.sizrng[0] < 0) - { - /* Fail if none of the PHI's arguments resulted in updating PHI_REF - (perhaps because they have all been already visited by prior - recursive calls). */ - psnlim->leave_phi (ref); - return NULL_TREE; - } - - /* Avoid changing *THIS. */ - if (pref && pref != this) - *pref = phi_ref; - - psnlim->leave_phi (ref); - - return phi_ref.ref; -} - -/* Return the maximum amount of space remaining and if non-null, set - argument to the minimum. */ - -offset_int -access_ref::size_remaining (offset_int *pmin /* = NULL */) const -{ - offset_int minbuf; - if (!pmin) - pmin = &minbuf; - - /* add_offset() ensures the offset range isn't inverted. */ - gcc_checking_assert (offrng[0] <= offrng[1]); - - if (base0) - { - /* The offset into referenced object is zero-based (i.e., it's - not referenced by a pointer into middle of some unknown object). */ - if (offrng[0] < 0 && offrng[1] < 0) - { - /* If the offset is negative the remaining size is zero. */ - *pmin = 0; - return 0; - } - - if (sizrng[1] <= offrng[0]) - { - /* If the starting offset is greater than or equal to the upper - bound on the size of the object, the space remaining is zero. - As a special case, if it's equal, set *PMIN to -1 to let - the caller know the offset is valid and just past the end. */ - *pmin = sizrng[1] == offrng[0] ? -1 : 0; - return 0; - } - - /* Otherwise return the size minus the lower bound of the offset. */ - offset_int or0 = offrng[0] < 0 ? 0 : offrng[0]; - - *pmin = sizrng[0] - or0; - return sizrng[1] - or0; - } - - /* The offset to the referenced object isn't zero-based (i.e., it may - refer to a byte other than the first. The size of such an object - is constrained only by the size of the address space (the result - of max_object_size()). */ - if (sizrng[1] <= offrng[0]) - { - *pmin = 0; - return 0; - } - - offset_int or0 = offrng[0] < 0 ? 0 : offrng[0]; - - *pmin = sizrng[0] - or0; - return sizrng[1] - or0; -} - -/* Return true if the offset and object size are in range for SIZE. */ - -bool -access_ref::offset_in_range (const offset_int &size) const -{ - if (size_remaining () < size) - return false; - - if (base0) - return offmax[0] >= 0 && offmax[1] <= sizrng[1]; - - offset_int maxoff = wi::to_offset (TYPE_MAX_VALUE (ptrdiff_type_node)); - return offmax[0] > -maxoff && offmax[1] < maxoff; -} - -/* Add the range [MIN, MAX] to the offset range. For known objects (with - zero-based offsets) at least one of whose offset's bounds is in range, - constrain the other (or both) to the bounds of the object (i.e., zero - and the upper bound of its size). This improves the quality of - diagnostics. */ - -void access_ref::add_offset (const offset_int &min, const offset_int &max) -{ - if (min <= max) - { - /* To add an ordinary range just add it to the bounds. */ - offrng[0] += min; - offrng[1] += max; - } - else if (!base0) - { - /* To add an inverted range to an offset to an unknown object - expand it to the maximum. */ - add_max_offset (); - return; - } - else - { - /* To add an inverted range to an offset to an known object set - the upper bound to the maximum representable offset value - (which may be greater than MAX_OBJECT_SIZE). - The lower bound is either the sum of the current offset and - MIN when abs(MAX) is greater than the former, or zero otherwise. - Zero because then then inverted range includes the negative of - the lower bound. */ - offset_int maxoff = wi::to_offset (TYPE_MAX_VALUE (ptrdiff_type_node)); - offrng[1] = maxoff; - - if (max >= 0) - { - offrng[0] = 0; - if (offmax[0] > 0) - offmax[0] = 0; - return; - } - - offset_int absmax = wi::abs (max); - if (offrng[0] < absmax) - { - offrng[0] += min; - /* Cap the lower bound at the upper (set to MAXOFF above) - to avoid inadvertently recreating an inverted range. */ - if (offrng[1] < offrng[0]) - offrng[0] = offrng[1]; - } - else - offrng[0] = 0; - } - - /* Set the minimum and maximmum computed so far. */ - if (offrng[1] < 0 && offrng[1] < offmax[0]) - offmax[0] = offrng[1]; - if (offrng[0] > 0 && offrng[0] > offmax[1]) - offmax[1] = offrng[0]; - - if (!base0) - return; - - /* When referencing a known object check to see if the offset computed - so far is in bounds... */ - offset_int remrng[2]; - remrng[1] = size_remaining (remrng); - if (remrng[1] > 0 || remrng[0] < 0) - { - /* ...if so, constrain it so that neither bound exceeds the size of - the object. Out of bounds offsets are left unchanged, and, for - better or worse, become in bounds later. They should be detected - and diagnosed at the point they first become invalid by - -Warray-bounds. */ - if (offrng[0] < 0) - offrng[0] = 0; - if (offrng[1] > sizrng[1]) - offrng[1] = sizrng[1]; - } -} - -/* Set a bit for the PHI in VISITED and return true if it wasn't - already set. */ - -bool -ssa_name_limit_t::visit_phi (tree ssa_name) -{ - if (!visited) - visited = BITMAP_ALLOC (NULL); - - /* Return false if SSA_NAME has already been visited. */ - return bitmap_set_bit (visited, SSA_NAME_VERSION (ssa_name)); -} - -/* Clear a bit for the PHI in VISITED. */ - -void -ssa_name_limit_t::leave_phi (tree ssa_name) -{ - /* Return false if SSA_NAME has already been visited. */ - bitmap_clear_bit (visited, SSA_NAME_VERSION (ssa_name)); -} - -/* Return false if the SSA_NAME chain length counter has reached - the limit, otherwise increment the counter and return true. */ - -bool -ssa_name_limit_t::next () -{ - /* Return a negative value to let caller avoid recursing beyond - the specified limit. */ - if (ssa_def_max == 0) - return false; - - --ssa_def_max; - return true; -} - -/* If the SSA_NAME has already been "seen" return a positive value. - Otherwise add it to VISITED. If the SSA_NAME limit has been - reached, return a negative value. Otherwise return zero. */ - -int -ssa_name_limit_t::next_phi (tree ssa_name) -{ - { - gimple *def_stmt = SSA_NAME_DEF_STMT (ssa_name); - /* Return a positive value if the PHI has already been visited. */ - if (gimple_code (def_stmt) == GIMPLE_PHI - && !visit_phi (ssa_name)) - return 1; - } - - /* Return a negative value to let caller avoid recursing beyond - the specified limit. */ - if (ssa_def_max == 0) - return -1; - - --ssa_def_max; - - return 0; -} - -ssa_name_limit_t::~ssa_name_limit_t () -{ - if (visited) - BITMAP_FREE (visited); -} - -/* Default ctor. Initialize object with pointers to the range_query - and cache_type instances to use or null. */ - -pointer_query::pointer_query (range_query *qry /* = NULL */, - cache_type *cache /* = NULL */) -: rvals (qry), var_cache (cache), hits (), misses (), - failures (), depth (), max_depth () -{ - /* No op. */ -} - -/* Return a pointer to the cached access_ref instance for the SSA_NAME - PTR if it's there or null otherwise. */ - -const access_ref * -pointer_query::get_ref (tree ptr, int ostype /* = 1 */) const -{ - if (!var_cache) - { - ++misses; - return NULL; - } - - unsigned version = SSA_NAME_VERSION (ptr); - unsigned idx = version << 1 | (ostype & 1); - if (var_cache->indices.length () <= idx) - { - ++misses; - return NULL; - } - - unsigned cache_idx = var_cache->indices[idx]; - if (var_cache->access_refs.length () <= cache_idx) - { - ++misses; - return NULL; - } - - access_ref &cache_ref = var_cache->access_refs[cache_idx]; - if (cache_ref.ref) - { - ++hits; - return &cache_ref; - } - - ++misses; - return NULL; -} - -/* Retrieve the access_ref instance for a variable from the cache if it's - there or compute it and insert it into the cache if it's nonnonull. */ - -bool -pointer_query::get_ref (tree ptr, access_ref *pref, int ostype /* = 1 */) -{ - const unsigned version - = TREE_CODE (ptr) == SSA_NAME ? SSA_NAME_VERSION (ptr) : 0; - - if (var_cache && version) - { - unsigned idx = version << 1 | (ostype & 1); - if (idx < var_cache->indices.length ()) - { - unsigned cache_idx = var_cache->indices[idx] - 1; - if (cache_idx < var_cache->access_refs.length () - && var_cache->access_refs[cache_idx].ref) - { - ++hits; - *pref = var_cache->access_refs[cache_idx]; - return true; - } - } - - ++misses; - } - - if (!compute_objsize (ptr, ostype, pref, this)) - { - ++failures; - return false; - } - - return true; -} - -/* Add a copy of the access_ref REF for the SSA_NAME to the cache if it's - nonnull. */ - -void -pointer_query::put_ref (tree ptr, const access_ref &ref, int ostype /* = 1 */) -{ - /* Only add populated/valid entries. */ - if (!var_cache || !ref.ref || ref.sizrng[0] < 0) - return; - - /* Add REF to the two-level cache. */ - unsigned version = SSA_NAME_VERSION (ptr); - unsigned idx = version << 1 | (ostype & 1); - - /* Grow INDICES if necessary. An index is valid if it's nonzero. - Its value minus one is the index into ACCESS_REFS. Not all - entries are valid. */ - if (var_cache->indices.length () <= idx) - var_cache->indices.safe_grow_cleared (idx + 1); - - if (!var_cache->indices[idx]) - var_cache->indices[idx] = var_cache->access_refs.length () + 1; - - /* Grow ACCESS_REF cache if necessary. An entry is valid if its - REF member is nonnull. All entries except for the last two - are valid. Once nonnull, the REF value must stay unchanged. */ - unsigned cache_idx = var_cache->indices[idx]; - if (var_cache->access_refs.length () <= cache_idx) - var_cache->access_refs.safe_grow_cleared (cache_idx + 1); - - access_ref cache_ref = var_cache->access_refs[cache_idx - 1]; - if (cache_ref.ref) - { - gcc_checking_assert (cache_ref.ref == ref.ref); - return; - } - - cache_ref = ref; -} - -/* Flush the cache if it's nonnull. */ - -void -pointer_query::flush_cache () -{ - if (!var_cache) - return; - var_cache->indices.release (); - var_cache->access_refs.release (); -} - /* Return true if NAME starts with __builtin_ or __sync_. */ static bool @@ -1106,218 +547,6 @@ string_length (const void *ptr, unsigned eltsize, unsigned maxelts) return n; } -/* For a call EXPR at LOC to a function FNAME that expects a string - in the argument ARG, issue a diagnostic due to it being a called - with an argument that is a character array with no terminating - NUL. SIZE is the EXACT size of the array, and BNDRNG the number - of characters in which the NUL is expected. Either EXPR or FNAME - may be null but noth both. SIZE may be null when BNDRNG is null. */ - -void -warn_string_no_nul (location_t loc, tree expr, const char *fname, - tree arg, tree decl, tree size /* = NULL_TREE */, - bool exact /* = false */, - const wide_int bndrng[2] /* = NULL */) -{ - const opt_code opt = OPT_Wstringop_overread; - if ((expr && warning_suppressed_p (expr, opt)) - || warning_suppressed_p (arg, opt)) - return; - - loc = expansion_point_location_if_in_system_header (loc); - bool warned; - - /* Format the bound range as a string to keep the nuber of messages - from exploding. */ - char bndstr[80]; - *bndstr = 0; - if (bndrng) - { - if (bndrng[0] == bndrng[1]) - sprintf (bndstr, "%llu", (unsigned long long) bndrng[0].to_uhwi ()); - else - sprintf (bndstr, "[%llu, %llu]", - (unsigned long long) bndrng[0].to_uhwi (), - (unsigned long long) bndrng[1].to_uhwi ()); - } - - const tree maxobjsize = max_object_size (); - const wide_int maxsiz = wi::to_wide (maxobjsize); - if (expr) - { - tree func = get_callee_fndecl (expr); - if (bndrng) - { - if (wi::ltu_p (maxsiz, bndrng[0])) - warned = warning_at (loc, opt, - "%qD specified bound %s exceeds " - "maximum object size %E", - func, bndstr, maxobjsize); - else - { - bool maybe = wi::to_wide (size) == bndrng[0]; - warned = warning_at (loc, opt, - exact - ? G_("%qD specified bound %s exceeds " - "the size %E of unterminated array") - : (maybe - ? G_("%qD specified bound %s may " - "exceed the size of at most %E " - "of unterminated array") - : G_("%qD specified bound %s exceeds " - "the size of at most %E " - "of unterminated array")), - func, bndstr, size); - } - } - else - warned = warning_at (loc, opt, - "%qD argument missing terminating nul", - func); - } - else - { - if (bndrng) - { - if (wi::ltu_p (maxsiz, bndrng[0])) - warned = warning_at (loc, opt, - "%qs specified bound %s exceeds " - "maximum object size %E", - fname, bndstr, maxobjsize); - else - { - bool maybe = wi::to_wide (size) == bndrng[0]; - warned = warning_at (loc, opt, - exact - ? G_("%qs specified bound %s exceeds " - "the size %E of unterminated array") - : (maybe - ? G_("%qs specified bound %s may " - "exceed the size of at most %E " - "of unterminated array") - : G_("%qs specified bound %s exceeds " - "the size of at most %E " - "of unterminated array")), - fname, bndstr, size); - } - } - else - warned = warning_at (loc, opt, - "%qs argument missing terminating nul", - fname); - } - - if (warned) - { - inform (DECL_SOURCE_LOCATION (decl), - "referenced argument declared here"); - suppress_warning (arg, opt); - if (expr) - suppress_warning (expr, opt); - } -} - -/* For a call EXPR (which may be null) that expects a string argument - SRC as an argument, returns false if SRC is a character array with - no terminating NUL. When nonnull, BOUND is the number of characters - in which to expect the terminating NUL. RDONLY is true for read-only - accesses such as strcmp, false for read-write such as strcpy. When - EXPR is also issues a warning. */ - -bool -check_nul_terminated_array (tree expr, tree src, - tree bound /* = NULL_TREE */) -{ - /* The constant size of the array SRC points to. The actual size - may be less of EXACT is true, but not more. */ - tree size; - /* True if SRC involves a non-constant offset into the array. */ - bool exact; - /* The unterminated constant array SRC points to. */ - tree nonstr = unterminated_array (src, &size, &exact); - if (!nonstr) - return true; - - /* NONSTR refers to the non-nul terminated constant array and SIZE - is the constant size of the array in bytes. EXACT is true when - SIZE is exact. */ - - wide_int bndrng[2]; - if (bound) - { - value_range r; - - get_global_range_query ()->range_of_expr (r, bound); - - if (r.kind () != VR_RANGE) - return true; - - bndrng[0] = r.lower_bound (); - bndrng[1] = r.upper_bound (); - - if (exact) - { - if (wi::leu_p (bndrng[0], wi::to_wide (size))) - return true; - } - else if (wi::lt_p (bndrng[0], wi::to_wide (size), UNSIGNED)) - return true; - } - - if (expr) - warn_string_no_nul (EXPR_LOCATION (expr), expr, NULL, src, nonstr, - size, exact, bound ? bndrng : NULL); - - return false; -} - -/* If EXP refers to an unterminated constant character array return - the declaration of the object of which the array is a member or - element and if SIZE is not null, set *SIZE to the size of - the unterminated array and set *EXACT if the size is exact or - clear it otherwise. Otherwise return null. */ - -tree -unterminated_array (tree exp, tree *size /* = NULL */, bool *exact /* = NULL */) -{ - /* C_STRLEN will return NULL and set DECL in the info - structure if EXP references a unterminated array. */ - c_strlen_data lendata = { }; - tree len = c_strlen (exp, 1, &lendata); - if (len == NULL_TREE && lendata.minlen && lendata.decl) - { - if (size) - { - len = lendata.minlen; - if (lendata.off) - { - /* Constant offsets are already accounted for in LENDATA.MINLEN, - but not in a SSA_NAME + CST expression. */ - if (TREE_CODE (lendata.off) == INTEGER_CST) - *exact = true; - else if (TREE_CODE (lendata.off) == PLUS_EXPR - && TREE_CODE (TREE_OPERAND (lendata.off, 1)) == INTEGER_CST) - { - /* Subtract the offset from the size of the array. */ - *exact = false; - tree temp = TREE_OPERAND (lendata.off, 1); - temp = fold_convert (ssizetype, temp); - len = fold_build2 (MINUS_EXPR, ssizetype, len, temp); - } - else - *exact = false; - } - else - *exact = true; - - *size = len; - } - return lendata.decl; - } - - return NULL_TREE; -} - /* Compute the length of a null-terminated character string or wide character string handling character sizes of 1, 2, and 4 bytes. TREE_STRING_LENGTH is not the right way because it evaluates to @@ -3969,10097 +3198,7353 @@ determine_block_size (tree len, rtx len_rtx, GET_MODE_MASK (GET_MODE (len_rtx))); } -/* Issue a warning OPT for a bounded call EXP with a bound in RANGE - accessing an object with SIZE. */ - +/* A convenience wrapper for check_access above to check access + by a read-only function like puts. */ + static bool -maybe_warn_for_bound (opt_code opt, location_t loc, tree exp, tree func, - tree bndrng[2], tree size, const access_data *pad = NULL) +check_read_access (tree exp, tree src, tree bound /* = NULL_TREE */, + int ost /* = 1 */) { - if (!bndrng[0] || warning_suppressed_p (exp, opt)) - return false; - - tree maxobjsize = max_object_size (); - - bool warned = false; - - if (opt == OPT_Wstringop_overread) - { - bool maybe = pad && pad->src.phi (); - - if (tree_int_cst_lt (maxobjsize, bndrng[0])) - { - if (bndrng[0] == bndrng[1]) - warned = (func - ? warning_at (loc, opt, - (maybe - ? G_("%qD specified bound %E may " - "exceed maximum object size %E") - : G_("%qD specified bound %E " - "exceeds maximum object size %E")), - func, bndrng[0], maxobjsize) - : warning_at (loc, opt, - (maybe - ? G_("specified bound %E may " - "exceed maximum object size %E") - : G_("specified bound %E " - "exceeds maximum object size %E")), - bndrng[0], maxobjsize)); - else - warned = (func - ? warning_at (loc, opt, - (maybe - ? G_("%qD specified bound [%E, %E] may " - "exceed maximum object size %E") - : G_("%qD specified bound [%E, %E] " - "exceeds maximum object size %E")), - func, bndrng[0], bndrng[1], maxobjsize) - : warning_at (loc, opt, - (maybe - ? G_("specified bound [%E, %E] may " - "exceed maximum object size %E") - : G_("specified bound [%E, %E] " - "exceeds maximum object size %E")), - bndrng[0], bndrng[1], maxobjsize)); - } - else if (!size || tree_int_cst_le (bndrng[0], size)) - return false; - else if (tree_int_cst_equal (bndrng[0], bndrng[1])) - warned = (func - ? warning_at (loc, opt, - (maybe - ? G_("%qD specified bound %E may exceed " - "source size %E") - : G_("%qD specified bound %E exceeds " - "source size %E")), - func, bndrng[0], size) - : warning_at (loc, opt, - (maybe - ? G_("specified bound %E may exceed " - "source size %E") - : G_("specified bound %E exceeds " - "source size %E")), - bndrng[0], size)); - else - warned = (func - ? warning_at (loc, opt, - (maybe - ? G_("%qD specified bound [%E, %E] may " - "exceed source size %E") - : G_("%qD specified bound [%E, %E] exceeds " - "source size %E")), - func, bndrng[0], bndrng[1], size) - : warning_at (loc, opt, - (maybe - ? G_("specified bound [%E, %E] may exceed " - "source size %E") - : G_("specified bound [%E, %E] exceeds " - "source size %E")), - bndrng[0], bndrng[1], size)); - if (warned) - { - if (pad && pad->src.ref) - { - if (DECL_P (pad->src.ref)) - inform (DECL_SOURCE_LOCATION (pad->src.ref), - "source object declared here"); - else if (EXPR_HAS_LOCATION (pad->src.ref)) - inform (EXPR_LOCATION (pad->src.ref), - "source object allocated here"); - } - suppress_warning (exp, opt); - } - - return warned; - } - - bool maybe = pad && pad->dst.phi (); - if (tree_int_cst_lt (maxobjsize, bndrng[0])) - { - if (bndrng[0] == bndrng[1]) - warned = (func - ? warning_at (loc, opt, - (maybe - ? G_("%qD specified size %E may " - "exceed maximum object size %E") - : G_("%qD specified size %E " - "exceeds maximum object size %E")), - func, bndrng[0], maxobjsize) - : warning_at (loc, opt, - (maybe - ? G_("specified size %E may exceed " - "maximum object size %E") - : G_("specified size %E exceeds " - "maximum object size %E")), - bndrng[0], maxobjsize)); - else - warned = (func - ? warning_at (loc, opt, - (maybe - ? G_("%qD specified size between %E and %E " - "may exceed maximum object size %E") - : G_("%qD specified size between %E and %E " - "exceeds maximum object size %E")), - func, bndrng[0], bndrng[1], maxobjsize) - : warning_at (loc, opt, - (maybe - ? G_("specified size between %E and %E " - "may exceed maximum object size %E") - : G_("specified size between %E and %E " - "exceeds maximum object size %E")), - bndrng[0], bndrng[1], maxobjsize)); - } - else if (!size || tree_int_cst_le (bndrng[0], size)) - return false; - else if (tree_int_cst_equal (bndrng[0], bndrng[1])) - warned = (func - ? warning_at (loc, opt, - (maybe - ? G_("%qD specified bound %E may exceed " - "destination size %E") - : G_("%qD specified bound %E exceeds " - "destination size %E")), - func, bndrng[0], size) - : warning_at (loc, opt, - (maybe - ? G_("specified bound %E may exceed " - "destination size %E") - : G_("specified bound %E exceeds " - "destination size %E")), - bndrng[0], size)); - else - warned = (func - ? warning_at (loc, opt, - (maybe - ? G_("%qD specified bound [%E, %E] may exceed " - "destination size %E") - : G_("%qD specified bound [%E, %E] exceeds " - "destination size %E")), - func, bndrng[0], bndrng[1], size) - : warning_at (loc, opt, - (maybe - ? G_("specified bound [%E, %E] exceeds " - "destination size %E") - : G_("specified bound [%E, %E] exceeds " - "destination size %E")), - bndrng[0], bndrng[1], size)); - - if (warned) - { - if (pad && pad->dst.ref) - { - if (DECL_P (pad->dst.ref)) - inform (DECL_SOURCE_LOCATION (pad->dst.ref), - "destination object declared here"); - else if (EXPR_HAS_LOCATION (pad->dst.ref)) - inform (EXPR_LOCATION (pad->dst.ref), - "destination object allocated here"); - } - suppress_warning (exp, opt); - } + if (!warn_stringop_overread) + return true; - return warned; + if (bound && !useless_type_conversion_p (size_type_node, TREE_TYPE (bound))) + bound = fold_convert (size_type_node, bound); + access_data data (exp, access_read_only, NULL_TREE, false, bound, true); + compute_objsize (src, ost, &data.src); + return check_access (exp, /*dstwrite=*/ NULL_TREE, /*maxread=*/ bound, + /*srcstr=*/ src, /*dstsize=*/ NULL_TREE, data.mode, + &data); } -/* For an expression EXP issue an access warning controlled by option OPT - with access to a region SIZE bytes in size in the RANGE of sizes. - WRITE is true for a write access, READ for a read access, neither for - call that may or may not perform an access but for which the range - is expected to valid. - Returns true when a warning has been issued. */ +/* Helper to determine and check the sizes of the source and the destination + of calls to __builtin_{bzero,memcpy,mempcpy,memset} calls. EXP is the + call expression, DEST is the destination argument, SRC is the source + argument or null, and LEN is the number of bytes. Use Object Size type-0 + regardless of the OPT_Wstringop_overflow_ setting. Return true on success + (no overflow or invalid sizes), false otherwise. */ static bool -warn_for_access (location_t loc, tree func, tree exp, int opt, tree range[2], - tree size, bool write, bool read, bool maybe) -{ - bool warned = false; - - if (write && read) - { - if (tree_int_cst_equal (range[0], range[1])) - warned = (func - ? warning_n (loc, opt, tree_to_uhwi (range[0]), - (maybe - ? G_("%qD may access %E byte in a region " - "of size %E") - : G_("%qD accessing %E byte in a region " - "of size %E")), - (maybe - ? G_ ("%qD may access %E bytes in a region " - "of size %E") - : G_ ("%qD accessing %E bytes in a region " - "of size %E")), - func, range[0], size) - : warning_n (loc, opt, tree_to_uhwi (range[0]), - (maybe - ? G_("may access %E byte in a region " - "of size %E") - : G_("accessing %E byte in a region " - "of size %E")), - (maybe - ? G_("may access %E bytes in a region " - "of size %E") - : G_("accessing %E bytes in a region " - "of size %E")), - range[0], size)); - else if (tree_int_cst_sign_bit (range[1])) - { - /* Avoid printing the upper bound if it's invalid. */ - warned = (func - ? warning_at (loc, opt, - (maybe - ? G_("%qD may access %E or more bytes " - "in a region of size %E") - : G_("%qD accessing %E or more bytes " - "in a region of size %E")), - func, range[0], size) - : warning_at (loc, opt, - (maybe - ? G_("may access %E or more bytes " - "in a region of size %E") - : G_("accessing %E or more bytes " - "in a region of size %E")), - range[0], size)); - } - else - warned = (func - ? warning_at (loc, opt, - (maybe - ? G_("%qD may access between %E and %E " - "bytes in a region of size %E") - : G_("%qD accessing between %E and %E " - "bytes in a region of size %E")), - func, range[0], range[1], size) - : warning_at (loc, opt, - (maybe - ? G_("may access between %E and %E bytes " - "in a region of size %E") - : G_("accessing between %E and %E bytes " - "in a region of size %E")), - range[0], range[1], size)); - return warned; - } - - if (write) - { - if (tree_int_cst_equal (range[0], range[1])) - warned = (func - ? warning_n (loc, opt, tree_to_uhwi (range[0]), - (maybe - ? G_("%qD may write %E byte into a region " - "of size %E") - : G_("%qD writing %E byte into a region " - "of size %E overflows the destination")), - (maybe - ? G_("%qD may write %E bytes into a region " - "of size %E") - : G_("%qD writing %E bytes into a region " - "of size %E overflows the destination")), - func, range[0], size) - : warning_n (loc, opt, tree_to_uhwi (range[0]), - (maybe - ? G_("may write %E byte into a region " - "of size %E") - : G_("writing %E byte into a region " - "of size %E overflows the destination")), - (maybe - ? G_("may write %E bytes into a region " - "of size %E") - : G_("writing %E bytes into a region " - "of size %E overflows the destination")), - range[0], size)); - else if (tree_int_cst_sign_bit (range[1])) - { - /* Avoid printing the upper bound if it's invalid. */ - warned = (func - ? warning_at (loc, opt, - (maybe - ? G_("%qD may write %E or more bytes " - "into a region of size %E") - : G_("%qD writing %E or more bytes " - "into a region of size %E overflows " - "the destination")), - func, range[0], size) - : warning_at (loc, opt, - (maybe - ? G_("may write %E or more bytes into " - "a region of size %E") - : G_("writing %E or more bytes into " - "a region of size %E overflows " - "the destination")), - range[0], size)); - } - else - warned = (func - ? warning_at (loc, opt, - (maybe - ? G_("%qD may write between %E and %E bytes " - "into a region of size %E") - : G_("%qD writing between %E and %E bytes " - "into a region of size %E overflows " - "the destination")), - func, range[0], range[1], size) - : warning_at (loc, opt, - (maybe - ? G_("may write between %E and %E bytes " - "into a region of size %E") - : G_("writing between %E and %E bytes " - "into a region of size %E overflows " - "the destination")), - range[0], range[1], size)); - return warned; - } - - if (read) - { - if (tree_int_cst_equal (range[0], range[1])) - warned = (func - ? warning_n (loc, OPT_Wstringop_overread, - tree_to_uhwi (range[0]), - (maybe - ? G_("%qD may read %E byte from a region " - "of size %E") - : G_("%qD reading %E byte from a region " - "of size %E")), - (maybe - ? G_("%qD may read %E bytes from a region " - "of size %E") - : G_("%qD reading %E bytes from a region " - "of size %E")), - func, range[0], size) - : warning_n (loc, OPT_Wstringop_overread, - tree_to_uhwi (range[0]), - (maybe - ? G_("may read %E byte from a region " - "of size %E") - : G_("reading %E byte from a region " - "of size %E")), - (maybe - ? G_("may read %E bytes from a region " - "of size %E") - : G_("reading %E bytes from a region " - "of size %E")), - range[0], size)); - else if (tree_int_cst_sign_bit (range[1])) - { - /* Avoid printing the upper bound if it's invalid. */ - warned = (func - ? warning_at (loc, OPT_Wstringop_overread, - (maybe - ? G_("%qD may read %E or more bytes " - "from a region of size %E") - : G_("%qD reading %E or more bytes " - "from a region of size %E")), - func, range[0], size) - : warning_at (loc, OPT_Wstringop_overread, - (maybe - ? G_("may read %E or more bytes " - "from a region of size %E") - : G_("reading %E or more bytes " - "from a region of size %E")), - range[0], size)); - } - else - warned = (func - ? warning_at (loc, OPT_Wstringop_overread, - (maybe - ? G_("%qD may read between %E and %E bytes " - "from a region of size %E") - : G_("%qD reading between %E and %E bytes " - "from a region of size %E")), - func, range[0], range[1], size) - : warning_at (loc, opt, - (maybe - ? G_("may read between %E and %E bytes " - "from a region of size %E") - : G_("reading between %E and %E bytes " - "from a region of size %E")), - range[0], range[1], size)); - - if (warned) - suppress_warning (exp, OPT_Wstringop_overread); - - return warned; - } - - if (tree_int_cst_equal (range[0], range[1]) - || tree_int_cst_sign_bit (range[1])) - warned = (func - ? warning_n (loc, OPT_Wstringop_overread, - tree_to_uhwi (range[0]), - "%qD expecting %E byte in a region of size %E", - "%qD expecting %E bytes in a region of size %E", - func, range[0], size) - : warning_n (loc, OPT_Wstringop_overread, - tree_to_uhwi (range[0]), - "expecting %E byte in a region of size %E", - "expecting %E bytes in a region of size %E", - range[0], size)); - else if (tree_int_cst_sign_bit (range[1])) - { - /* Avoid printing the upper bound if it's invalid. */ - warned = (func - ? warning_at (loc, OPT_Wstringop_overread, - "%qD expecting %E or more bytes in a region " - "of size %E", - func, range[0], size) - : warning_at (loc, OPT_Wstringop_overread, - "expecting %E or more bytes in a region " - "of size %E", - range[0], size)); - } - else - warned = (func - ? warning_at (loc, OPT_Wstringop_overread, - "%qD expecting between %E and %E bytes in " - "a region of size %E", - func, range[0], range[1], size) - : warning_at (loc, OPT_Wstringop_overread, - "expecting between %E and %E bytes in " - "a region of size %E", - range[0], range[1], size)); - - if (warned) - suppress_warning (exp, OPT_Wstringop_overread); +check_memop_access (tree exp, tree dest, tree src, tree size) +{ + /* For functions like memset and memcpy that operate on raw memory + try to determine the size of the largest source and destination + object using type-0 Object Size regardless of the object size + type specified by the option. */ + access_data data (exp, access_read_write); + tree srcsize = src ? compute_objsize (src, 0, &data.src) : NULL_TREE; + tree dstsize = compute_objsize (dest, 0, &data.dst); - return warned; + return check_access (exp, size, /*maxread=*/NULL_TREE, + srcsize, dstsize, data.mode, &data); } -/* Issue one inform message describing each target of an access REF. - WRITE is set for a write access and clear for a read access. */ +/* Validate memchr arguments without performing any expansion. + Return NULL_RTX. */ -void -access_ref::inform_access (access_mode mode) const +static rtx +expand_builtin_memchr (tree exp, rtx) { - const access_ref &aref = *this; - if (!aref.ref) - return; - - if (aref.phi ()) - { - /* Set MAXREF to refer to the largest object and fill ALL_REFS - with data for all objects referenced by the PHI arguments. */ - access_ref maxref; - auto_vec all_refs; - if (!get_ref (&all_refs, &maxref)) - return; - - /* Except for MAXREF, the rest of the arguments' offsets need not - reflect one added to the PHI itself. Determine the latter from - MAXREF on which the result is based. */ - const offset_int orng[] = - { - offrng[0] - maxref.offrng[0], - wi::smax (offrng[1] - maxref.offrng[1], offrng[0]), - }; - - /* Add the final PHI's offset to that of each of the arguments - and recurse to issue an inform message for it. */ - for (unsigned i = 0; i != all_refs.length (); ++i) - { - /* Skip any PHIs; those could lead to infinite recursion. */ - if (all_refs[i].phi ()) - continue; - - all_refs[i].add_offset (orng[0], orng[1]); - all_refs[i].inform_access (mode); - } - return; - } - - /* Convert offset range and avoid including a zero range since it - isn't necessarily meaningful. */ - HOST_WIDE_INT diff_min = tree_to_shwi (TYPE_MIN_VALUE (ptrdiff_type_node)); - HOST_WIDE_INT diff_max = tree_to_shwi (TYPE_MAX_VALUE (ptrdiff_type_node)); - HOST_WIDE_INT minoff; - HOST_WIDE_INT maxoff = diff_max; - if (wi::fits_shwi_p (aref.offrng[0])) - minoff = aref.offrng[0].to_shwi (); - else - minoff = aref.offrng[0] < 0 ? diff_min : diff_max; - - if (wi::fits_shwi_p (aref.offrng[1])) - maxoff = aref.offrng[1].to_shwi (); - - if (maxoff <= diff_min || maxoff >= diff_max) - /* Avoid mentioning an upper bound that's equal to or in excess - of the maximum of ptrdiff_t. */ - maxoff = minoff; + if (!validate_arglist (exp, + POINTER_TYPE, INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE)) + return NULL_RTX; - /* Convert size range and always include it since all sizes are - meaningful. */ - unsigned long long minsize = 0, maxsize = 0; - if (wi::fits_shwi_p (aref.sizrng[0]) - && wi::fits_shwi_p (aref.sizrng[1])) - { - minsize = aref.sizrng[0].to_shwi (); - maxsize = aref.sizrng[1].to_shwi (); - } + tree arg1 = CALL_EXPR_ARG (exp, 0); + tree len = CALL_EXPR_ARG (exp, 2); - /* SIZRNG doesn't necessarily have the same range as the allocation - size determined by gimple_call_alloc_size (). */ - char sizestr[80]; - if (minsize == maxsize) - sprintf (sizestr, "%llu", minsize); - else - sprintf (sizestr, "[%llu, %llu]", minsize, maxsize); - - char offstr[80]; - if (minoff == 0 - && (maxoff == 0 || aref.sizrng[1] <= maxoff)) - offstr[0] = '\0'; - else if (minoff == maxoff) - sprintf (offstr, "%lli", (long long) minoff); - else - sprintf (offstr, "[%lli, %lli]", (long long) minoff, (long long) maxoff); + check_read_access (exp, arg1, len, 0); - location_t loc = UNKNOWN_LOCATION; + return NULL_RTX; +} - tree ref = this->ref; - tree allocfn = NULL_TREE; - if (TREE_CODE (ref) == SSA_NAME) - { - gimple *stmt = SSA_NAME_DEF_STMT (ref); - if (is_gimple_call (stmt)) - { - loc = gimple_location (stmt); - if (gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN)) - { - /* Strip the SSA_NAME suffix from the variable name and - recreate an identifier with the VLA's original name. */ - ref = gimple_call_lhs (stmt); - if (SSA_NAME_IDENTIFIER (ref)) - { - ref = SSA_NAME_IDENTIFIER (ref); - const char *id = IDENTIFIER_POINTER (ref); - size_t len = strcspn (id, ".$"); - if (!len) - len = strlen (id); - ref = get_identifier_with_length (id, len); - } - } - else - { - /* Except for VLAs, retrieve the allocation function. */ - allocfn = gimple_call_fndecl (stmt); - if (!allocfn) - allocfn = gimple_call_fn (stmt); - if (TREE_CODE (allocfn) == SSA_NAME) - { - /* For an ALLOC_CALL via a function pointer make a small - effort to determine the destination of the pointer. */ - gimple *def = SSA_NAME_DEF_STMT (allocfn); - if (gimple_assign_single_p (def)) - { - tree rhs = gimple_assign_rhs1 (def); - if (DECL_P (rhs)) - allocfn = rhs; - else if (TREE_CODE (rhs) == COMPONENT_REF) - allocfn = TREE_OPERAND (rhs, 1); - } - } - } - } - else if (gimple_nop_p (stmt)) - /* Handle DECL_PARM below. */ - ref = SSA_NAME_VAR (ref); - } +/* Expand a call EXP to the memcpy builtin. + Return NULL_RTX if we failed, the caller should emit a normal call, + otherwise try to get the result in TARGET, if convenient (and in + mode MODE if that's convenient). */ - if (DECL_P (ref)) - loc = DECL_SOURCE_LOCATION (ref); - else if (EXPR_P (ref) && EXPR_HAS_LOCATION (ref)) - loc = EXPR_LOCATION (ref); - else if (TREE_CODE (ref) != IDENTIFIER_NODE - && TREE_CODE (ref) != SSA_NAME) - return; +static rtx +expand_builtin_memcpy (tree exp, rtx target) +{ + if (!validate_arglist (exp, + POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) + return NULL_RTX; - if (mode == access_read_write || mode == access_write_only) - { - if (allocfn == NULL_TREE) - { - if (*offstr) - inform (loc, "at offset %s into destination object %qE of size %s", - offstr, ref, sizestr); - else - inform (loc, "destination object %qE of size %s", ref, sizestr); - return; - } + tree dest = CALL_EXPR_ARG (exp, 0); + tree src = CALL_EXPR_ARG (exp, 1); + tree len = CALL_EXPR_ARG (exp, 2); - if (*offstr) - inform (loc, - "at offset %s into destination object of size %s " - "allocated by %qE", offstr, sizestr, allocfn); - else - inform (loc, "destination object of size %s allocated by %qE", - sizestr, allocfn); - return; - } + check_memop_access (exp, dest, src, len); - if (mode == access_read_only) - { - if (allocfn == NULL_TREE) - { - if (*offstr) - inform (loc, "at offset %s into source object %qE of size %s", - offstr, ref, sizestr); - else - inform (loc, "source object %qE of size %s", ref, sizestr); + return expand_builtin_memory_copy_args (dest, src, len, target, exp, + /*retmode=*/ RETURN_BEGIN, false); +} - return; - } +/* Check a call EXP to the memmove built-in for validity. + Return NULL_RTX on both success and failure. */ - if (*offstr) - inform (loc, - "at offset %s into source object of size %s allocated by %qE", - offstr, sizestr, allocfn); - else - inform (loc, "source object of size %s allocated by %qE", - sizestr, allocfn); - return; - } +static rtx +expand_builtin_memmove (tree exp, rtx target) +{ + if (!validate_arglist (exp, + POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) + return NULL_RTX; - if (allocfn == NULL_TREE) - { - if (*offstr) - inform (loc, "at offset %s into object %qE of size %s", - offstr, ref, sizestr); - else - inform (loc, "object %qE of size %s", ref, sizestr); + tree dest = CALL_EXPR_ARG (exp, 0); + tree src = CALL_EXPR_ARG (exp, 1); + tree len = CALL_EXPR_ARG (exp, 2); - return; - } + check_memop_access (exp, dest, src, len); - if (*offstr) - inform (loc, - "at offset %s into object of size %s allocated by %qE", - offstr, sizestr, allocfn); - else - inform (loc, "object of size %s allocated by %qE", - sizestr, allocfn); + return expand_builtin_memory_copy_args (dest, src, len, target, exp, + /*retmode=*/ RETURN_BEGIN, true); } -/* Helper to set RANGE to the range of BOUND if it's nonnull, bounded - by BNDRNG if nonnull and valid. */ +/* Expand a call EXP to the mempcpy builtin. + Return NULL_RTX if we failed; the caller should emit a normal call, + otherwise try to get the result in TARGET, if convenient (and in + mode MODE if that's convenient). */ -static void -get_size_range (tree bound, tree range[2], const offset_int bndrng[2]) +static rtx +expand_builtin_mempcpy (tree exp, rtx target) { - if (bound) - get_size_range (bound, range); - - if (!bndrng || (bndrng[0] == 0 && bndrng[1] == HOST_WIDE_INT_M1U)) - return; + if (!validate_arglist (exp, + POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) + return NULL_RTX; - if (range[0] && TREE_CODE (range[0]) == INTEGER_CST) - { - offset_int r[] = - { wi::to_offset (range[0]), wi::to_offset (range[1]) }; - if (r[0] < bndrng[0]) - range[0] = wide_int_to_tree (sizetype, bndrng[0]); - if (bndrng[1] < r[1]) - range[1] = wide_int_to_tree (sizetype, bndrng[1]); - } - else - { - range[0] = wide_int_to_tree (sizetype, bndrng[0]); - range[1] = wide_int_to_tree (sizetype, bndrng[1]); - } -} + tree dest = CALL_EXPR_ARG (exp, 0); + tree src = CALL_EXPR_ARG (exp, 1); + tree len = CALL_EXPR_ARG (exp, 2); -/* Try to verify that the sizes and lengths of the arguments to a string - manipulation function given by EXP are within valid bounds and that - the operation does not lead to buffer overflow or read past the end. - Arguments other than EXP may be null. When non-null, the arguments - have the following meaning: - DST is the destination of a copy call or NULL otherwise. - SRC is the source of a copy call or NULL otherwise. - DSTWRITE is the number of bytes written into the destination obtained - from the user-supplied size argument to the function (such as in - memcpy(DST, SRCs, DSTWRITE) or strncpy(DST, DRC, DSTWRITE). - MAXREAD is the user-supplied bound on the length of the source sequence - (such as in strncat(d, s, N). It specifies the upper limit on the number - of bytes to write. If NULL, it's taken to be the same as DSTWRITE. - SRCSTR is the source string (such as in strcpy(DST, SRC)) when the - expression EXP is a string function call (as opposed to a memory call - like memcpy). As an exception, SRCSTR can also be an integer denoting - the precomputed size of the source string or object (for functions like - memcpy). - DSTSIZE is the size of the destination object. + /* Policy does not generally allow using compute_objsize (which + is used internally by check_memop_size) to change code generation + or drive optimization decisions. - When DSTWRITE is null LEN is checked to verify that it doesn't exceed - SIZE_MAX. + In this instance it is safe because the code we generate has + the same semantics regardless of the return value of + check_memop_sizes. Exactly the same amount of data is copied + and the return value is exactly the same in both cases. - WRITE is true for write accesses, READ is true for reads. Both are - false for simple size checks in calls to functions that neither read - from nor write to the region. + Furthermore, check_memop_size always uses mode 0 for the call to + compute_objsize, so the imprecise nature of compute_objsize is + avoided. */ - When nonnull, PAD points to a more detailed description of the access. + /* Avoid expanding mempcpy into memcpy when the call is determined + to overflow the buffer. This also prevents the same overflow + from being diagnosed again when expanding memcpy. */ + if (!check_memop_access (exp, dest, src, len)) + return NULL_RTX; - If the call is successfully verified as safe return true, otherwise - return false. */ + return expand_builtin_mempcpy_args (dest, src, len, + target, exp, /*retmode=*/ RETURN_END); +} -bool -check_access (tree exp, tree dstwrite, - tree maxread, tree srcstr, tree dstsize, - access_mode mode, const access_data *pad /* = NULL */) -{ - /* The size of the largest object is half the address space, or - PTRDIFF_MAX. (This is way too permissive.) */ - tree maxobjsize = max_object_size (); - - /* Either an approximate/minimum the length of the source string for - string functions or the size of the source object for raw memory - functions. */ - tree slen = NULL_TREE; - - /* The range of the access in bytes; first set to the write access - for functions that write and then read for those that also (or - just) read. */ - tree range[2] = { NULL_TREE, NULL_TREE }; - - /* Set to true when the exact number of bytes written by a string - function like strcpy is not known and the only thing that is - known is that it must be at least one (for the terminating nul). */ - bool at_least_one = false; - if (srcstr) - { - /* SRCSTR is normally a pointer to string but as a special case - it can be an integer denoting the length of a string. */ - if (POINTER_TYPE_P (TREE_TYPE (srcstr))) - { - if (!check_nul_terminated_array (exp, srcstr, maxread)) - return false; - /* Try to determine the range of lengths the source string - refers to. If it can be determined and is less than - the upper bound given by MAXREAD add one to it for - the terminating nul. Otherwise, set it to one for - the same reason, or to MAXREAD as appropriate. */ - c_strlen_data lendata = { }; - get_range_strlen (srcstr, &lendata, /* eltsize = */ 1); - range[0] = lendata.minlen; - range[1] = lendata.maxbound ? lendata.maxbound : lendata.maxlen; - if (range[0] - && TREE_CODE (range[0]) == INTEGER_CST - && TREE_CODE (range[1]) == INTEGER_CST - && (!maxread || TREE_CODE (maxread) == INTEGER_CST)) - { - if (maxread && tree_int_cst_le (maxread, range[0])) - range[0] = range[1] = maxread; - else - range[0] = fold_build2 (PLUS_EXPR, size_type_node, - range[0], size_one_node); +/* Helper function to do the actual work for expand of memory copy family + functions (memcpy, mempcpy, stpcpy). Expansing should assign LEN bytes + of memory from SRC to DEST and assign to TARGET if convenient. Return + value is based on RETMODE argument. */ - if (maxread && tree_int_cst_le (maxread, range[1])) - range[1] = maxread; - else if (!integer_all_onesp (range[1])) - range[1] = fold_build2 (PLUS_EXPR, size_type_node, - range[1], size_one_node); +static rtx +expand_builtin_memory_copy_args (tree dest, tree src, tree len, + rtx target, tree exp, memop_ret retmode, + bool might_overlap) +{ + unsigned int src_align = get_pointer_alignment (src); + unsigned int dest_align = get_pointer_alignment (dest); + rtx dest_mem, src_mem, dest_addr, len_rtx; + HOST_WIDE_INT expected_size = -1; + unsigned int expected_align = 0; + unsigned HOST_WIDE_INT min_size; + unsigned HOST_WIDE_INT max_size; + unsigned HOST_WIDE_INT probable_max_size; - slen = range[0]; - } - else - { - at_least_one = true; - slen = size_one_node; - } - } - else - slen = srcstr; - } + bool is_move_done; - if (!dstwrite && !maxread) - { - /* When the only available piece of data is the object size - there is nothing to do. */ - if (!slen) - return true; + /* If DEST is not a pointer type, call the normal function. */ + if (dest_align == 0) + return NULL_RTX; - /* Otherwise, when the length of the source sequence is known - (as with strlen), set DSTWRITE to it. */ - if (!range[0]) - dstwrite = slen; - } + /* If either SRC is not a pointer type, don't do this + operation in-line. */ + if (src_align == 0) + return NULL_RTX; - if (!dstsize) - dstsize = maxobjsize; + if (currently_expanding_gimple_stmt) + stringop_block_profile (currently_expanding_gimple_stmt, + &expected_align, &expected_size); - /* Set RANGE to that of DSTWRITE if non-null, bounded by PAD->DST.BNDRNG - if valid. */ - get_size_range (dstwrite, range, pad ? pad->dst.bndrng : NULL); + if (expected_align < dest_align) + expected_align = dest_align; + dest_mem = get_memory_rtx (dest, len); + set_mem_align (dest_mem, dest_align); + len_rtx = expand_normal (len); + determine_block_size (len, len_rtx, &min_size, &max_size, + &probable_max_size); - tree func = get_callee_fndecl (exp); - /* Read vs write access by built-ins can be determined from the const - qualifiers on the pointer argument. In the absence of attribute - access, non-const qualified pointer arguments to user-defined - functions are assumed to both read and write the objects. */ - const bool builtin = func ? fndecl_built_in_p (func) : false; + /* Try to get the byte representation of the constant SRC points to, + with its byte size in NBYTES. */ + unsigned HOST_WIDE_INT nbytes; + const char *rep = getbyterep (src, &nbytes); - /* First check the number of bytes to be written against the maximum - object size. */ - if (range[0] - && TREE_CODE (range[0]) == INTEGER_CST - && tree_int_cst_lt (maxobjsize, range[0])) - { - location_t loc = EXPR_LOCATION (exp); - maybe_warn_for_bound (OPT_Wstringop_overflow_, loc, exp, func, range, - NULL_TREE, pad); - return false; - } - - /* The number of bytes to write is "exact" if DSTWRITE is non-null, - constant, and in range of unsigned HOST_WIDE_INT. */ - bool exactwrite = dstwrite && tree_fits_uhwi_p (dstwrite); - - /* Next check the number of bytes to be written against the destination - object size. */ - if (range[0] || !exactwrite || integer_all_onesp (dstwrite)) + /* If the function's constant bound LEN_RTX is less than or equal + to the byte size of the representation of the constant argument, + and if block move would be done by pieces, we can avoid loading + the bytes from memory and only store the computed constant. + This works in the overlap (memmove) case as well because + store_by_pieces just generates a series of stores of constants + from the representation returned by getbyterep(). */ + if (rep + && CONST_INT_P (len_rtx) + && (unsigned HOST_WIDE_INT) INTVAL (len_rtx) <= nbytes + && can_store_by_pieces (INTVAL (len_rtx), builtin_memcpy_read_str, + CONST_CAST (char *, rep), + dest_align, false)) { - if (range[0] - && TREE_CODE (range[0]) == INTEGER_CST - && ((tree_fits_uhwi_p (dstsize) - && tree_int_cst_lt (dstsize, range[0])) - || (dstwrite - && tree_fits_uhwi_p (dstwrite) - && tree_int_cst_lt (dstwrite, range[0])))) - { - const opt_code opt = OPT_Wstringop_overflow_; - if (warning_suppressed_p (exp, opt) - || (pad && pad->dst.ref - && warning_suppressed_p (pad->dst.ref, opt))) - return false; - - location_t loc = EXPR_LOCATION (exp); - bool warned = false; - if (dstwrite == slen && at_least_one) - { - /* This is a call to strcpy with a destination of 0 size - and a source of unknown length. The call will write - at least one byte past the end of the destination. */ - warned = (func - ? warning_at (loc, opt, - "%qD writing %E or more bytes into " - "a region of size %E overflows " - "the destination", - func, range[0], dstsize) - : warning_at (loc, opt, - "writing %E or more bytes into " - "a region of size %E overflows " - "the destination", - range[0], dstsize)); - } - else - { - const bool read - = mode == access_read_only || mode == access_read_write; - const bool write - = mode == access_write_only || mode == access_read_write; - const bool maybe = pad && pad->dst.parmarray; - warned = warn_for_access (loc, func, exp, - OPT_Wstringop_overflow_, - range, dstsize, - write, read && !builtin, maybe); - } - - if (warned) - { - suppress_warning (exp, OPT_Wstringop_overflow_); - if (pad) - pad->dst.inform_access (pad->mode); - } - - /* Return error when an overflow has been detected. */ - return false; - } + dest_mem = store_by_pieces (dest_mem, INTVAL (len_rtx), + builtin_memcpy_read_str, + CONST_CAST (char *, rep), + dest_align, false, retmode); + dest_mem = force_operand (XEXP (dest_mem, 0), target); + dest_mem = convert_memory_address (ptr_mode, dest_mem); + return dest_mem; } - /* Check the maximum length of the source sequence against the size - of the destination object if known, or against the maximum size - of an object. */ - if (maxread) - { - /* Set RANGE to that of MAXREAD, bounded by PAD->SRC.BNDRNG if - PAD is nonnull and BNDRNG is valid. */ - get_size_range (maxread, range, pad ? pad->src.bndrng : NULL); + src_mem = get_memory_rtx (src, len); + set_mem_align (src_mem, src_align); - location_t loc = EXPR_LOCATION (exp); - tree size = dstsize; - if (pad && pad->mode == access_read_only) - size = wide_int_to_tree (sizetype, pad->src.sizrng[1]); + /* Copy word part most expediently. */ + enum block_op_methods method = BLOCK_OP_NORMAL; + if (CALL_EXPR_TAILCALL (exp) + && (retmode == RETURN_BEGIN || target == const0_rtx)) + method = BLOCK_OP_TAILCALL; + bool use_mempcpy_call = (targetm.libc_has_fast_function (BUILT_IN_MEMPCPY) + && retmode == RETURN_END + && !might_overlap + && target != const0_rtx); + if (use_mempcpy_call) + method = BLOCK_OP_NO_LIBCALL_RET; + dest_addr = emit_block_move_hints (dest_mem, src_mem, len_rtx, method, + expected_align, expected_size, + min_size, max_size, probable_max_size, + use_mempcpy_call, &is_move_done, + might_overlap); - if (range[0] && maxread && tree_fits_uhwi_p (size)) - { - if (tree_int_cst_lt (maxobjsize, range[0])) - { - maybe_warn_for_bound (OPT_Wstringop_overread, loc, exp, func, - range, size, pad); - return false; - } + /* Bail out when a mempcpy call would be expanded as libcall and when + we have a target that provides a fast implementation + of mempcpy routine. */ + if (!is_move_done) + return NULL_RTX; - if (size != maxobjsize && tree_int_cst_lt (size, range[0])) - { - opt_code opt = (dstwrite || mode != access_read_only - ? OPT_Wstringop_overflow_ - : OPT_Wstringop_overread); - maybe_warn_for_bound (opt, loc, exp, func, range, size, pad); - return false; - } - } + if (dest_addr == pc_rtx) + return NULL_RTX; - maybe_warn_nonstring_arg (func, exp); - } - - /* Check for reading past the end of SRC. */ - bool overread = (slen - && slen == srcstr - && dstwrite - && range[0] - && TREE_CODE (slen) == INTEGER_CST - && tree_int_cst_lt (slen, range[0])); - /* If none is determined try to get a better answer based on the details - in PAD. */ - if (!overread - && pad - && pad->src.sizrng[1] >= 0 - && pad->src.offrng[0] >= 0 - && (pad->src.offrng[1] < 0 - || pad->src.offrng[0] <= pad->src.offrng[1])) - { - /* Set RANGE to that of MAXREAD, bounded by PAD->SRC.BNDRNG if - PAD is nonnull and BNDRNG is valid. */ - get_size_range (maxread, range, pad ? pad->src.bndrng : NULL); - /* Set OVERREAD for reads starting just past the end of an object. */ - overread = pad->src.sizrng[1] - pad->src.offrng[0] < pad->src.bndrng[0]; - range[0] = wide_int_to_tree (sizetype, pad->src.bndrng[0]); - slen = size_zero_node; - } - - if (overread) - { - const opt_code opt = OPT_Wstringop_overread; - if (warning_suppressed_p (exp, opt) - || (srcstr && warning_suppressed_p (srcstr, opt)) - || (pad && pad->src.ref - && warning_suppressed_p (pad->src.ref, opt))) - return false; + if (dest_addr == 0) + { + dest_addr = force_operand (XEXP (dest_mem, 0), target); + dest_addr = convert_memory_address (ptr_mode, dest_addr); + } - location_t loc = EXPR_LOCATION (exp); - const bool read - = mode == access_read_only || mode == access_read_write; - const bool maybe = pad && pad->dst.parmarray; - if (warn_for_access (loc, func, exp, opt, range, slen, false, read, - maybe)) - { - suppress_warning (exp, opt); - if (pad) - pad->src.inform_access (access_read_only); - } - return false; + if (retmode != RETURN_BEGIN && target != const0_rtx) + { + dest_addr = gen_rtx_PLUS (ptr_mode, dest_addr, len_rtx); + /* stpcpy pointer to last byte. */ + if (retmode == RETURN_END_MINUS_ONE) + dest_addr = gen_rtx_MINUS (ptr_mode, dest_addr, const1_rtx); } - return true; + return dest_addr; } -/* A convenience wrapper for check_access above to check access - by a read-only function like puts. */ - -static bool -check_read_access (tree exp, tree src, tree bound /* = NULL_TREE */, - int ost /* = 1 */) +static rtx +expand_builtin_mempcpy_args (tree dest, tree src, tree len, + rtx target, tree orig_exp, memop_ret retmode) { - if (!warn_stringop_overread) - return true; - - if (bound && !useless_type_conversion_p (size_type_node, TREE_TYPE (bound))) - bound = fold_convert (size_type_node, bound); - access_data data (exp, access_read_only, NULL_TREE, false, bound, true); - compute_objsize (src, ost, &data.src); - return check_access (exp, /*dstwrite=*/ NULL_TREE, /*maxread=*/ bound, - /*srcstr=*/ src, /*dstsize=*/ NULL_TREE, data.mode, - &data); + return expand_builtin_memory_copy_args (dest, src, len, target, orig_exp, + retmode, false); } -/* If STMT is a call to an allocation function, returns the constant - maximum size of the object allocated by the call represented as - sizetype. If nonnull, sets RNG1[] to the range of the size. - When nonnull, uses RVALS for range information, otherwise gets global - range info. - Returns null when STMT is not a call to a valid allocation function. */ +/* Expand into a movstr instruction, if one is available. Return NULL_RTX if + we failed, the caller should emit a normal call, otherwise try to + get the result in TARGET, if convenient. + Return value is based on RETMODE argument. */ -tree -gimple_call_alloc_size (gimple *stmt, wide_int rng1[2] /* = NULL */, - range_query * /* = NULL */) +static rtx +expand_movstr (tree dest, tree src, rtx target, memop_ret retmode) { - if (!stmt || !is_gimple_call (stmt)) - return NULL_TREE; - - tree allocfntype; - if (tree fndecl = gimple_call_fndecl (stmt)) - allocfntype = TREE_TYPE (fndecl); - else - allocfntype = gimple_call_fntype (stmt); + class expand_operand ops[3]; + rtx dest_mem; + rtx src_mem; - if (!allocfntype) - return NULL_TREE; + if (!targetm.have_movstr ()) + return NULL_RTX; - unsigned argidx1 = UINT_MAX, argidx2 = UINT_MAX; - tree at = lookup_attribute ("alloc_size", TYPE_ATTRIBUTES (allocfntype)); - if (!at) + dest_mem = get_memory_rtx (dest, NULL); + src_mem = get_memory_rtx (src, NULL); + if (retmode == RETURN_BEGIN) { - if (!gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN)) - return NULL_TREE; - - argidx1 = 0; + target = force_reg (Pmode, XEXP (dest_mem, 0)); + dest_mem = replace_equiv_address (dest_mem, target); } - unsigned nargs = gimple_call_num_args (stmt); + create_output_operand (&ops[0], + retmode != RETURN_BEGIN ? target : NULL_RTX, Pmode); + create_fixed_operand (&ops[1], dest_mem); + create_fixed_operand (&ops[2], src_mem); + if (!maybe_expand_insn (targetm.code_for_movstr, 3, ops)) + return NULL_RTX; - if (argidx1 == UINT_MAX) + if (retmode != RETURN_BEGIN && target != const0_rtx) { - tree atval = TREE_VALUE (at); - if (!atval) - return NULL_TREE; - - argidx1 = TREE_INT_CST_LOW (TREE_VALUE (atval)) - 1; - if (nargs <= argidx1) - return NULL_TREE; - - atval = TREE_CHAIN (atval); - if (atval) + target = ops[0].value; + /* movstr is supposed to set end to the address of the NUL + terminator. If the caller requested a mempcpy-like return value, + adjust it. */ + if (retmode == RETURN_END) { - argidx2 = TREE_INT_CST_LOW (TREE_VALUE (atval)) - 1; - if (nargs <= argidx2) - return NULL_TREE; + rtx tem = plus_constant (GET_MODE (target), + gen_lowpart (GET_MODE (target), target), 1); + emit_move_insn (target, force_operand (tem, NULL_RTX)); } } + return target; +} - tree size = gimple_call_arg (stmt, argidx1); - - wide_int rng1_buf[2]; - /* If RNG1 is not set, use the buffer. */ - if (!rng1) - rng1 = rng1_buf; - - /* Use maximum precision to avoid overflow below. */ - const int prec = ADDR_MAX_PRECISION; - - { - tree r[2]; - /* Determine the largest valid range size, including zero. */ - if (!get_size_range (size, r, SR_ALLOW_ZERO | SR_USE_LARGEST)) - return NULL_TREE; - rng1[0] = wi::to_wide (r[0], prec); - rng1[1] = wi::to_wide (r[1], prec); - } +/* Do some very basic size validation of a call to the strcpy builtin + given by EXP. Return NULL_RTX to have the built-in expand to a call + to the library function. */ - if (argidx2 > nargs && TREE_CODE (size) == INTEGER_CST) - return fold_convert (sizetype, size); +static rtx +expand_builtin_strcat (tree exp) +{ + if (!validate_arglist (exp, POINTER_TYPE, POINTER_TYPE, VOID_TYPE) + || !warn_stringop_overflow) + return NULL_RTX; - /* To handle ranges do the math in wide_int and return the product - of the upper bounds as a constant. Ignore anti-ranges. */ - tree n = argidx2 < nargs ? gimple_call_arg (stmt, argidx2) : integer_one_node; - wide_int rng2[2]; - { - tree r[2]; - /* As above, use the full non-negative range on failure. */ - if (!get_size_range (n, r, SR_ALLOW_ZERO | SR_USE_LARGEST)) - return NULL_TREE; - rng2[0] = wi::to_wide (r[0], prec); - rng2[1] = wi::to_wide (r[1], prec); - } + tree dest = CALL_EXPR_ARG (exp, 0); + tree src = CALL_EXPR_ARG (exp, 1); - /* Compute products of both bounds for the caller but return the lesser - of SIZE_MAX and the product of the upper bounds as a constant. */ - rng1[0] = rng1[0] * rng2[0]; - rng1[1] = rng1[1] * rng2[1]; + /* There is no way here to determine the length of the string in + the destination to which the SRC string is being appended so + just diagnose cases when the souce string is longer than + the destination object. */ + access_data data (exp, access_read_write, NULL_TREE, true, + NULL_TREE, true); + const int ost = warn_stringop_overflow ? warn_stringop_overflow - 1 : 1; + compute_objsize (src, ost, &data.src); + tree destsize = compute_objsize (dest, ost, &data.dst); - const tree size_max = TYPE_MAX_VALUE (sizetype); - if (wi::gtu_p (rng1[1], wi::to_wide (size_max, prec))) - { - rng1[1] = wi::to_wide (size_max, prec); - return size_max; - } + check_access (exp, /*dstwrite=*/NULL_TREE, /*maxread=*/NULL_TREE, + src, destsize, data.mode, &data); - return wide_int_to_tree (sizetype, rng1[1]); + return NULL_RTX; } -/* For an access to an object referenced to by the function parameter PTR - of pointer type, and set RNG[] to the range of sizes of the object - obtainedfrom the attribute access specification for the current function. - Set STATIC_ARRAY if the array parameter has been declared [static]. - Return the function parameter on success and null otherwise. */ - -tree -gimple_parm_array_size (tree ptr, wide_int rng[2], - bool *static_array /* = NULL */) -{ - /* For a function argument try to determine the byte size of the array - from the current function declaratation (e.g., attribute access or - related). */ - tree var = SSA_NAME_VAR (ptr); - if (TREE_CODE (var) != PARM_DECL) - return NULL_TREE; +/* Expand expression EXP, which is a call to the strcpy builtin. Return + NULL_RTX if we failed the caller should emit a normal call, otherwise + try to get the result in TARGET, if convenient (and in mode MODE if that's + convenient). */ - const unsigned prec = TYPE_PRECISION (sizetype); +static rtx +expand_builtin_strcpy (tree exp, rtx target) +{ + if (!validate_arglist (exp, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) + return NULL_RTX; - rdwr_map rdwr_idx; - attr_access *access = get_parm_access (rdwr_idx, var); - if (!access) - return NULL_TREE; + tree dest = CALL_EXPR_ARG (exp, 0); + tree src = CALL_EXPR_ARG (exp, 1); - if (access->sizarg != UINT_MAX) + if (warn_stringop_overflow) { - /* TODO: Try to extract the range from the argument based on - those of subsequent assertions or based on known calls to - the current function. */ - return NULL_TREE; + access_data data (exp, access_read_write, NULL_TREE, true, + NULL_TREE, true); + const int ost = warn_stringop_overflow ? warn_stringop_overflow - 1 : 1; + compute_objsize (src, ost, &data.src); + tree dstsize = compute_objsize (dest, ost, &data.dst); + check_access (exp, /*dstwrite=*/ NULL_TREE, + /*maxread=*/ NULL_TREE, /*srcstr=*/ src, + dstsize, data.mode, &data); } - if (!access->minsize) - return NULL_TREE; - - /* Only consider ordinary array bound at level 2 (or above if it's - ever added). */ - if (warn_array_parameter < 2 && !access->static_p) - return NULL_TREE; - - if (static_array) - *static_array = access->static_p; - - rng[0] = wi::zero (prec); - rng[1] = wi::uhwi (access->minsize, prec); - /* Multiply the array bound encoded in the attribute by the size - of what the pointer argument to which it decays points to. */ - tree eltype = TREE_TYPE (TREE_TYPE (ptr)); - tree size = TYPE_SIZE_UNIT (eltype); - if (!size || TREE_CODE (size) != INTEGER_CST) - return NULL_TREE; + if (rtx ret = expand_builtin_strcpy_args (exp, dest, src, target)) + { + /* Check to see if the argument was declared attribute nonstring + and if so, issue a warning since at this point it's not known + to be nul-terminated. */ + tree fndecl = get_callee_fndecl (exp); + maybe_warn_nonstring_arg (fndecl, exp); + return ret; + } - rng[1] *= wi::to_wide (size, prec); - return var; + return NULL_RTX; } -/* Wrapper around the wide_int overload of get_range that accepts - offset_int instead. For middle end expressions returns the same - result. For a subset of nonconstamt expressions emitted by the front - end determines a more precise range than would be possible otherwise. */ +/* Helper function to do the actual work for expand_builtin_strcpy. The + arguments to the builtin_strcpy call DEST and SRC are broken out + so that this can also be called without constructing an actual CALL_EXPR. + The other arguments and return value are the same as for + expand_builtin_strcpy. */ -static bool -get_offset_range (tree x, gimple *stmt, offset_int r[2], range_query *rvals) +static rtx +expand_builtin_strcpy_args (tree exp, tree dest, tree src, rtx target) { - offset_int add = 0; - if (TREE_CODE (x) == PLUS_EXPR) + /* Detect strcpy calls with unterminated arrays.. */ + tree size; + bool exact; + if (tree nonstr = unterminated_array (src, &size, &exact)) { - /* Handle constant offsets in pointer addition expressions seen - n the front end IL. */ - tree op = TREE_OPERAND (x, 1); - if (TREE_CODE (op) == INTEGER_CST) - { - op = fold_convert (signed_type_for (TREE_TYPE (op)), op); - add = wi::to_offset (op); - x = TREE_OPERAND (x, 0); - } + /* NONSTR refers to the non-nul terminated constant array. */ + warn_string_no_nul (EXPR_LOCATION (exp), exp, NULL, src, nonstr, + size, exact); + return NULL_RTX; } - if (TREE_CODE (x) == NOP_EXPR) - /* Also handle conversions to sizetype seen in the front end IL. */ - x = TREE_OPERAND (x, 0); + return expand_movstr (dest, src, target, /*retmode=*/ RETURN_BEGIN); +} - tree type = TREE_TYPE (x); - if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type)) - return false; +/* Expand a call EXP to the stpcpy builtin. + Return NULL_RTX if we failed the caller should emit a normal call, + otherwise try to get the result in TARGET, if convenient (and in + mode MODE if that's convenient). */ - if (TREE_CODE (x) != INTEGER_CST - && TREE_CODE (x) != SSA_NAME) - { - if (TYPE_UNSIGNED (type) - && TYPE_PRECISION (type) == TYPE_PRECISION (sizetype)) - type = signed_type_for (type); +static rtx +expand_builtin_stpcpy_1 (tree exp, rtx target, machine_mode mode) +{ + tree dst, src; + location_t loc = EXPR_LOCATION (exp); - r[0] = wi::to_offset (TYPE_MIN_VALUE (type)) + add; - r[1] = wi::to_offset (TYPE_MAX_VALUE (type)) + add; - return x; + if (!validate_arglist (exp, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) + return NULL_RTX; + + dst = CALL_EXPR_ARG (exp, 0); + src = CALL_EXPR_ARG (exp, 1); + + if (warn_stringop_overflow) + { + access_data data (exp, access_read_write); + tree destsize = compute_objsize (dst, warn_stringop_overflow - 1, + &data.dst); + check_access (exp, /*dstwrite=*/NULL_TREE, /*maxread=*/NULL_TREE, + src, destsize, data.mode, &data); } - wide_int wr[2]; - if (!get_range (x, stmt, wr, rvals)) - return false; + /* If return value is ignored, transform stpcpy into strcpy. */ + if (target == const0_rtx && builtin_decl_implicit (BUILT_IN_STRCPY)) + { + tree fn = builtin_decl_implicit (BUILT_IN_STRCPY); + tree result = build_call_nofold_loc (loc, fn, 2, dst, src); + return expand_expr (result, target, mode, EXPAND_NORMAL); + } + else + { + tree len, lenp1; + rtx ret; - signop sgn = SIGNED; - /* Only convert signed integers or unsigned sizetype to a signed - offset and avoid converting large positive values in narrower - types to negative offsets. */ - if (TYPE_UNSIGNED (type) - && wr[0].get_precision () < TYPE_PRECISION (sizetype)) - sgn = UNSIGNED; + /* Ensure we get an actual string whose length can be evaluated at + compile-time, not an expression containing a string. This is + because the latter will potentially produce pessimized code + when used to produce the return value. */ + c_strlen_data lendata = { }; + if (!c_getstr (src) + || !(len = c_strlen (src, 0, &lendata, 1))) + return expand_movstr (dst, src, target, + /*retmode=*/ RETURN_END_MINUS_ONE); - r[0] = offset_int::from (wr[0], sgn); - r[1] = offset_int::from (wr[1], sgn); - return true; -} + if (lendata.decl) + warn_string_no_nul (EXPR_LOCATION (exp), exp, NULL, src, lendata.decl); -/* Return the argument that the call STMT to a built-in function returns - or null if it doesn't. On success, set OFFRNG[] to the range of offsets - from the argument reflected in the value returned by the built-in if it - can be determined, otherwise to 0 and HWI_M1U respectively. Set - *PAST_END for functions like mempcpy that might return a past the end - pointer (most functions return a dereferenceable pointer to an existing - element of an array). */ + lenp1 = size_binop_loc (loc, PLUS_EXPR, len, ssize_int (1)); + ret = expand_builtin_mempcpy_args (dst, src, lenp1, + target, exp, + /*retmode=*/ RETURN_END_MINUS_ONE); -static tree -gimple_call_return_array (gimple *stmt, offset_int offrng[2], bool *past_end, - range_query *rvals) -{ - /* Clear and set below for the rare function(s) that might return - a past-the-end pointer. */ - *past_end = false; + if (ret) + return ret; - { - /* Check for attribute fn spec to see if the function returns one - of its arguments. */ - attr_fnspec fnspec = gimple_call_fnspec (as_a (stmt)); - unsigned int argno; - if (fnspec.returns_arg (&argno)) - { - /* Functions return the first argument (not a range). */ - offrng[0] = offrng[1] = 0; - return gimple_call_arg (stmt, argno); - } - } + if (TREE_CODE (len) == INTEGER_CST) + { + rtx len_rtx = expand_normal (len); - if (gimple_call_num_args (stmt) < 1) - return NULL_TREE; + if (CONST_INT_P (len_rtx)) + { + ret = expand_builtin_strcpy_args (exp, dst, src, target); - tree fn = gimple_call_fndecl (stmt); - if (!gimple_call_builtin_p (stmt, BUILT_IN_NORMAL)) - { - /* See if this is a call to placement new. */ - if (!fn - || !DECL_IS_OPERATOR_NEW_P (fn) - || DECL_IS_REPLACEABLE_OPERATOR_NEW_P (fn)) - return NULL_TREE; + if (ret) + { + if (! target) + { + if (mode != VOIDmode) + target = gen_reg_rtx (mode); + else + target = gen_reg_rtx (GET_MODE (ret)); + } + if (GET_MODE (target) != GET_MODE (ret)) + ret = gen_lowpart (GET_MODE (target), ret); - /* Check the mangling, keeping in mind that operator new takes - a size_t which could be unsigned int or unsigned long. */ - tree fname = DECL_ASSEMBLER_NAME (fn); - if (!id_equal (fname, "_ZnwjPv") // ordinary form - && !id_equal (fname, "_ZnwmPv") // ordinary form - && !id_equal (fname, "_ZnajPv") // array form - && !id_equal (fname, "_ZnamPv")) // array form - return NULL_TREE; + ret = plus_constant (GET_MODE (ret), ret, INTVAL (len_rtx)); + ret = emit_move_insn (target, force_operand (ret, NULL_RTX)); + gcc_assert (ret); - if (gimple_call_num_args (stmt) != 2) - return NULL_TREE; + return target; + } + } + } - /* Allocation functions return a pointer to the beginning. */ - offrng[0] = offrng[1] = 0; - return gimple_call_arg (stmt, 1); + return expand_movstr (dst, src, target, + /*retmode=*/ RETURN_END_MINUS_ONE); } +} - switch (DECL_FUNCTION_CODE (fn)) - { - case BUILT_IN_MEMCPY: - case BUILT_IN_MEMCPY_CHK: - case BUILT_IN_MEMMOVE: - case BUILT_IN_MEMMOVE_CHK: - case BUILT_IN_MEMSET: - case BUILT_IN_STRCAT: - case BUILT_IN_STRCAT_CHK: - case BUILT_IN_STRCPY: - case BUILT_IN_STRCPY_CHK: - case BUILT_IN_STRNCAT: - case BUILT_IN_STRNCAT_CHK: - case BUILT_IN_STRNCPY: - case BUILT_IN_STRNCPY_CHK: - /* Functions return the first argument (not a range). */ - offrng[0] = offrng[1] = 0; - return gimple_call_arg (stmt, 0); +/* Expand a call EXP to the stpcpy builtin and diagnose uses of nonstring + arguments while being careful to avoid duplicate warnings (which could + be issued if the expander were to expand the call, resulting in it + being emitted in expand_call(). */ - case BUILT_IN_MEMPCPY: - case BUILT_IN_MEMPCPY_CHK: - { - /* The returned pointer is in a range constrained by the smaller - of the upper bound of the size argument and the source object - size. */ - offrng[0] = 0; - offrng[1] = HOST_WIDE_INT_M1U; - tree off = gimple_call_arg (stmt, 2); - bool off_valid = get_offset_range (off, stmt, offrng, rvals); - if (!off_valid || offrng[0] != offrng[1]) - { - /* If the offset is either indeterminate or in some range, - try to constrain its upper bound to at most the size - of the source object. */ - access_ref aref; - tree src = gimple_call_arg (stmt, 1); - if (compute_objsize (src, 1, &aref, rvals) - && aref.sizrng[1] < offrng[1]) - offrng[1] = aref.sizrng[1]; - } +static rtx +expand_builtin_stpcpy (tree exp, rtx target, machine_mode mode) +{ + if (rtx ret = expand_builtin_stpcpy_1 (exp, target, mode)) + { + /* The call has been successfully expanded. Check for nonstring + arguments and issue warnings as appropriate. */ + maybe_warn_nonstring_arg (get_callee_fndecl (exp), exp); + return ret; + } - /* Mempcpy may return a past-the-end pointer. */ - *past_end = true; - return gimple_call_arg (stmt, 0); - } + return NULL_RTX; +} - case BUILT_IN_MEMCHR: - { - tree off = gimple_call_arg (stmt, 2); - if (get_offset_range (off, stmt, offrng, rvals)) - offrng[1] -= 1; - else - offrng[1] = HOST_WIDE_INT_M1U; +/* Check a call EXP to the stpncpy built-in for validity. + Return NULL_RTX on both success and failure. */ - offrng[0] = 0; - return gimple_call_arg (stmt, 0); - } +static rtx +expand_builtin_stpncpy (tree exp, rtx) +{ + if (!validate_arglist (exp, + POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE) + || !warn_stringop_overflow) + return NULL_RTX; - case BUILT_IN_STRCHR: - case BUILT_IN_STRRCHR: - case BUILT_IN_STRSTR: - offrng[0] = 0; - offrng[1] = HOST_WIDE_INT_M1U; - return gimple_call_arg (stmt, 0); + /* The source and destination of the call. */ + tree dest = CALL_EXPR_ARG (exp, 0); + tree src = CALL_EXPR_ARG (exp, 1); - case BUILT_IN_STPCPY: - case BUILT_IN_STPCPY_CHK: - { - access_ref aref; - tree src = gimple_call_arg (stmt, 1); - if (compute_objsize (src, 1, &aref, rvals)) - offrng[1] = aref.sizrng[1] - 1; - else - offrng[1] = HOST_WIDE_INT_M1U; - - offrng[0] = 0; - return gimple_call_arg (stmt, 0); - } + /* The exact number of bytes to write (not the maximum). */ + tree len = CALL_EXPR_ARG (exp, 2); + access_data data (exp, access_read_write); + /* The size of the destination object. */ + tree destsize = compute_objsize (dest, warn_stringop_overflow - 1, &data.dst); + check_access (exp, len, /*maxread=*/len, src, destsize, data.mode, &data); + return NULL_RTX; +} - case BUILT_IN_STPNCPY: - case BUILT_IN_STPNCPY_CHK: - { - /* The returned pointer is in a range between the first argument - and it plus the smaller of the upper bound of the size argument - and the source object size. */ - offrng[1] = HOST_WIDE_INT_M1U; - tree off = gimple_call_arg (stmt, 2); - if (!get_offset_range (off, stmt, offrng, rvals) - || offrng[0] != offrng[1]) - { - /* If the offset is either indeterminate or in some range, - try to constrain its upper bound to at most the size - of the source object. */ - access_ref aref; - tree src = gimple_call_arg (stmt, 1); - if (compute_objsize (src, 1, &aref, rvals) - && aref.sizrng[1] < offrng[1]) - offrng[1] = aref.sizrng[1]; - } +/* Callback routine for store_by_pieces. Read GET_MODE_BITSIZE (MODE) + bytes from constant string DATA + OFFSET and return it as target + constant. */ - /* When the source is the empty string the returned pointer is - a copy of the argument. Otherwise stpcpy can also return - a past-the-end pointer. */ - offrng[0] = 0; - *past_end = true; - return gimple_call_arg (stmt, 0); - } +rtx +builtin_strncpy_read_str (void *data, void *, HOST_WIDE_INT offset, + scalar_int_mode mode) +{ + const char *str = (const char *) data; - default: - break; - } + if ((unsigned HOST_WIDE_INT) offset > strlen (str)) + return const0_rtx; - return NULL_TREE; + return c_readstr (str + offset, mode); } -/* A helper of compute_objsize_r() to determine the size from an assignment - statement STMT with the RHS of either MIN_EXPR or MAX_EXPR. */ +/* Helper to check the sizes of sequences and the destination of calls + to __builtin_strncat and __builtin___strncat_chk. Returns true on + success (no overflow or invalid sizes), false otherwise. */ static bool -handle_min_max_size (gimple *stmt, int ostype, access_ref *pref, - ssa_name_limit_t &snlim, pointer_query *qry) +check_strncat_sizes (tree exp, tree objsize) { - tree_code code = gimple_assign_rhs_code (stmt); + tree dest = CALL_EXPR_ARG (exp, 0); + tree src = CALL_EXPR_ARG (exp, 1); + tree maxread = CALL_EXPR_ARG (exp, 2); - tree ptr = gimple_assign_rhs1 (stmt); + /* Try to determine the range of lengths that the source expression + refers to. */ + c_strlen_data lendata = { }; + get_range_strlen (src, &lendata, /* eltsize = */ 1); - /* In a valid MAX_/MIN_EXPR both operands must refer to the same array. - Determine the size/offset of each and use the one with more or less - space remaining, respectively. If either fails, use the information - determined from the other instead, adjusted up or down as appropriate - for the expression. */ - access_ref aref[2] = { *pref, *pref }; - if (!compute_objsize_r (ptr, ostype, &aref[0], snlim, qry)) - { - aref[0].base0 = false; - aref[0].offrng[0] = aref[0].offrng[1] = 0; - aref[0].add_max_offset (); - aref[0].set_max_size_range (); - } + /* Try to verify that the destination is big enough for the shortest + string. */ - ptr = gimple_assign_rhs2 (stmt); - if (!compute_objsize_r (ptr, ostype, &aref[1], snlim, qry)) + access_data data (exp, access_read_write, maxread, true); + if (!objsize && warn_stringop_overflow) { - aref[1].base0 = false; - aref[1].offrng[0] = aref[1].offrng[1] = 0; - aref[1].add_max_offset (); - aref[1].set_max_size_range (); + /* If it hasn't been provided by __strncat_chk, try to determine + the size of the destination object into which the source is + being copied. */ + objsize = compute_objsize (dest, warn_stringop_overflow - 1, &data.dst); } - if (!aref[0].ref && !aref[1].ref) - /* Fail if the identity of neither argument could be determined. */ - return false; + /* Add one for the terminating nul. */ + tree srclen = (lendata.minlen + ? fold_build2 (PLUS_EXPR, size_type_node, lendata.minlen, + size_one_node) + : NULL_TREE); - bool i0 = false; - if (aref[0].ref && aref[0].base0) + /* The strncat function copies at most MAXREAD bytes and always appends + the terminating nul so the specified upper bound should never be equal + to (or greater than) the size of the destination. */ + if (tree_fits_uhwi_p (maxread) && tree_fits_uhwi_p (objsize) + && tree_int_cst_equal (objsize, maxread)) { - if (aref[1].ref && aref[1].base0) - { - /* If the object referenced by both arguments has been determined - set *PREF to the one with more or less space remainng, whichever - is appopriate for CODE. - TODO: Indicate when the objects are distinct so it can be - diagnosed. */ - i0 = code == MAX_EXPR; - const bool i1 = !i0; - - if (aref[i0].size_remaining () < aref[i1].size_remaining ()) - *pref = aref[i1]; - else - *pref = aref[i0]; - return true; - } + location_t loc = EXPR_LOCATION (exp); + warning_at (loc, OPT_Wstringop_overflow_, + "%qD specified bound %E equals destination size", + get_callee_fndecl (exp), maxread); - /* If only the object referenced by one of the arguments could be - determined, use it and... */ - *pref = aref[0]; - i0 = true; + return false; } - else - *pref = aref[1]; - const bool i1 = !i0; - /* ...see if the offset obtained from the other pointer can be used - to tighten up the bound on the offset obtained from the first. */ - if ((code == MAX_EXPR && aref[i1].offrng[1] < aref[i0].offrng[0]) - || (code == MIN_EXPR && aref[i0].offrng[0] < aref[i1].offrng[1])) - { - pref->offrng[0] = aref[i0].offrng[0]; - pref->offrng[1] = aref[i0].offrng[1]; - } - return true; + if (!srclen + || (maxread && tree_fits_uhwi_p (maxread) + && tree_fits_uhwi_p (srclen) + && tree_int_cst_lt (maxread, srclen))) + srclen = maxread; + + /* The number of bytes to write is LEN but check_access will alsoa + check SRCLEN if LEN's value isn't known. */ + return check_access (exp, /*dstwrite=*/NULL_TREE, maxread, srclen, + objsize, data.mode, &data); } -/* A helper of compute_objsize_r() to determine the size from ARRAY_REF - AREF. ADDR is true if PTR is the operand of ADDR_EXPR. Return true - on success and false on failure. */ +/* Similar to expand_builtin_strcat, do some very basic size validation + of a call to the strcpy builtin given by EXP. Return NULL_RTX to have + the built-in expand to a call to the library function. */ -static bool -handle_array_ref (tree aref, bool addr, int ostype, access_ref *pref, - ssa_name_limit_t &snlim, pointer_query *qry) +static rtx +expand_builtin_strncat (tree exp, rtx) { - gcc_assert (TREE_CODE (aref) == ARRAY_REF); + if (!validate_arglist (exp, + POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE) + || !warn_stringop_overflow) + return NULL_RTX; - ++pref->deref; + tree dest = CALL_EXPR_ARG (exp, 0); + tree src = CALL_EXPR_ARG (exp, 1); + /* The upper bound on the number of bytes to write. */ + tree maxread = CALL_EXPR_ARG (exp, 2); - tree arefop = TREE_OPERAND (aref, 0); - tree reftype = TREE_TYPE (arefop); - if (!addr && TREE_CODE (TREE_TYPE (reftype)) == POINTER_TYPE) - /* Avoid arrays of pointers. FIXME: Hande pointers to arrays - of known bound. */ - return false; + /* Detect unterminated source (only). */ + if (!check_nul_terminated_array (exp, src, maxread)) + return NULL_RTX; - if (!compute_objsize_r (arefop, ostype, pref, snlim, qry)) - return false; + /* The length of the source sequence. */ + tree slen = c_strlen (src, 1); - offset_int orng[2]; - tree off = pref->eval (TREE_OPERAND (aref, 1)); - range_query *const rvals = qry ? qry->rvals : NULL; - if (!get_offset_range (off, NULL, orng, rvals)) + /* Try to determine the range of lengths that the source expression + refers to. Since the lengths are only used for warning and not + for code generation disable strict mode below. */ + tree maxlen = slen; + if (!maxlen) { - /* Set ORNG to the maximum offset representable in ptrdiff_t. */ - orng[1] = wi::to_offset (TYPE_MAX_VALUE (ptrdiff_type_node)); - orng[0] = -orng[1] - 1; + c_strlen_data lendata = { }; + get_range_strlen (src, &lendata, /* eltsize = */ 1); + maxlen = lendata.maxbound; } - /* Convert the array index range determined above to a byte - offset. */ - tree lowbnd = array_ref_low_bound (aref); - if (!integer_zerop (lowbnd) && tree_fits_uhwi_p (lowbnd)) - { - /* Adjust the index by the low bound of the array domain - (normally zero but 1 in Fortran). */ - unsigned HOST_WIDE_INT lb = tree_to_uhwi (lowbnd); - orng[0] -= lb; - orng[1] -= lb; - } + access_data data (exp, access_read_write); + /* Try to verify that the destination is big enough for the shortest + string. First try to determine the size of the destination object + into which the source is being copied. */ + tree destsize = compute_objsize (dest, warn_stringop_overflow - 1, &data.dst); + + /* Add one for the terminating nul. */ + tree srclen = (maxlen + ? fold_build2 (PLUS_EXPR, size_type_node, maxlen, + size_one_node) + : NULL_TREE); - tree eltype = TREE_TYPE (aref); - tree tpsize = TYPE_SIZE_UNIT (eltype); - if (!tpsize || TREE_CODE (tpsize) != INTEGER_CST) + /* The strncat function copies at most MAXREAD bytes and always appends + the terminating nul so the specified upper bound should never be equal + to (or greater than) the size of the destination. */ + if (tree_fits_uhwi_p (maxread) && tree_fits_uhwi_p (destsize) + && tree_int_cst_equal (destsize, maxread)) { - pref->add_max_offset (); - return true; + location_t loc = EXPR_LOCATION (exp); + warning_at (loc, OPT_Wstringop_overflow_, + "%qD specified bound %E equals destination size", + get_callee_fndecl (exp), maxread); + + return NULL_RTX; } - offset_int sz = wi::to_offset (tpsize); - orng[0] *= sz; - orng[1] *= sz; + if (!srclen + || (maxread && tree_fits_uhwi_p (maxread) + && tree_fits_uhwi_p (srclen) + && tree_int_cst_lt (maxread, srclen))) + srclen = maxread; - if (ostype && TREE_CODE (eltype) == ARRAY_TYPE) - { - /* Except for the permissive raw memory functions which use - the size of the whole object determined above, use the size - of the referenced array. Because the overall offset is from - the beginning of the complete array object add this overall - offset to the size of array. */ - offset_int sizrng[2] = - { - pref->offrng[0] + orng[0] + sz, - pref->offrng[1] + orng[1] + sz - }; - if (sizrng[1] < sizrng[0]) - std::swap (sizrng[0], sizrng[1]); - if (sizrng[0] >= 0 && sizrng[0] <= pref->sizrng[0]) - pref->sizrng[0] = sizrng[0]; - if (sizrng[1] >= 0 && sizrng[1] <= pref->sizrng[1]) - pref->sizrng[1] = sizrng[1]; - } - - pref->add_offset (orng[0], orng[1]); - return true; + check_access (exp, /*dstwrite=*/NULL_TREE, maxread, srclen, + destsize, data.mode, &data); + return NULL_RTX; } -/* A helper of compute_objsize_r() to determine the size from MEM_REF - MREF. Return true on success and false on failure. */ +/* Expand expression EXP, which is a call to the strncpy builtin. Return + NULL_RTX if we failed the caller should emit a normal call. */ -static bool -handle_mem_ref (tree mref, int ostype, access_ref *pref, - ssa_name_limit_t &snlim, pointer_query *qry) +static rtx +expand_builtin_strncpy (tree exp, rtx target) { - gcc_assert (TREE_CODE (mref) == MEM_REF); + location_t loc = EXPR_LOCATION (exp); + + if (!validate_arglist (exp, + POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) + return NULL_RTX; + tree dest = CALL_EXPR_ARG (exp, 0); + tree src = CALL_EXPR_ARG (exp, 1); + /* The number of bytes to write (not the maximum). */ + tree len = CALL_EXPR_ARG (exp, 2); - ++pref->deref; + /* The length of the source sequence. */ + tree slen = c_strlen (src, 1); - if (VECTOR_TYPE_P (TREE_TYPE (mref))) + if (warn_stringop_overflow) { - /* Hack: Handle MEM_REFs of vector types as those to complete - objects; those may be synthesized from multiple assignments - to consecutive data members (see PR 93200 and 96963). - FIXME: Vectorized assignments should only be present after - vectorization so this hack is only necessary after it has - run and could be avoided in calls from prior passes (e.g., - tree-ssa-strlen.c). - FIXME: Deal with this more generally, e.g., by marking up - such MEM_REFs at the time they're created. */ - ostype = 0; + access_data data (exp, access_read_write, len, true, len, true); + const int ost = warn_stringop_overflow ? warn_stringop_overflow - 1 : 1; + compute_objsize (src, ost, &data.src); + tree dstsize = compute_objsize (dest, ost, &data.dst); + /* The number of bytes to write is LEN but check_access will also + check SLEN if LEN's value isn't known. */ + check_access (exp, /*dstwrite=*/len, + /*maxread=*/len, src, dstsize, data.mode, &data); } - tree mrefop = TREE_OPERAND (mref, 0); - if (!compute_objsize_r (mrefop, ostype, pref, snlim, qry)) - return false; + /* We must be passed a constant len and src parameter. */ + if (!tree_fits_uhwi_p (len) || !slen || !tree_fits_uhwi_p (slen)) + return NULL_RTX; - offset_int orng[2]; - tree off = pref->eval (TREE_OPERAND (mref, 1)); - range_query *const rvals = qry ? qry->rvals : NULL; - if (!get_offset_range (off, NULL, orng, rvals)) + slen = size_binop_loc (loc, PLUS_EXPR, slen, ssize_int (1)); + + /* We're required to pad with trailing zeros if the requested + len is greater than strlen(s2)+1. In that case try to + use store_by_pieces, if it fails, punt. */ + if (tree_int_cst_lt (slen, len)) { - /* Set ORNG to the maximum offset representable in ptrdiff_t. */ - orng[1] = wi::to_offset (TYPE_MAX_VALUE (ptrdiff_type_node)); - orng[0] = -orng[1] - 1; + unsigned int dest_align = get_pointer_alignment (dest); + const char *p = c_getstr (src); + rtx dest_mem; + + if (!p || dest_align == 0 || !tree_fits_uhwi_p (len) + || !can_store_by_pieces (tree_to_uhwi (len), + builtin_strncpy_read_str, + CONST_CAST (char *, p), + dest_align, false)) + return NULL_RTX; + + dest_mem = get_memory_rtx (dest, len); + store_by_pieces (dest_mem, tree_to_uhwi (len), + builtin_strncpy_read_str, + CONST_CAST (char *, p), dest_align, false, + RETURN_BEGIN); + dest_mem = force_operand (XEXP (dest_mem, 0), target); + dest_mem = convert_memory_address (ptr_mode, dest_mem); + return dest_mem; } - pref->add_offset (orng[0], orng[1]); - return true; + return NULL_RTX; } -/* Helper to compute the size of the object referenced by the PTR - expression which must have pointer type, using Object Size type - OSTYPE (only the least significant 2 bits are used). - On success, sets PREF->REF to the DECL of the referenced object - if it's unique, otherwise to null, PREF->OFFRNG to the range of - offsets into it, and PREF->SIZRNG to the range of sizes of - the object(s). - SNLIM is used to avoid visiting the same PHI operand multiple - times, and, when nonnull, RVALS to determine range information. - Returns true on success, false when a meaningful size (or range) - cannot be determined. - - The function is intended for diagnostics and should not be used - to influence code generation or optimization. */ +/* Callback routine for store_by_pieces. Read GET_MODE_BITSIZE (MODE) + bytes from constant string DATA + OFFSET and return it as target + constant. If PREV isn't nullptr, it has the RTL info from the + previous iteration. */ -static bool -compute_objsize_r (tree ptr, int ostype, access_ref *pref, - ssa_name_limit_t &snlim, pointer_query *qry) +rtx +builtin_memset_read_str (void *data, void *prevp, + HOST_WIDE_INT offset ATTRIBUTE_UNUSED, + scalar_int_mode mode) { - STRIP_NOPS (ptr); - - const bool addr = TREE_CODE (ptr) == ADDR_EXPR; - if (addr) + by_pieces_prev *prev = (by_pieces_prev *) prevp; + if (prev != nullptr && prev->data != nullptr) { - --pref->deref; - ptr = TREE_OPERAND (ptr, 0); + /* Use the previous data in the same mode. */ + if (prev->mode == mode) + return prev->data; } - if (DECL_P (ptr)) - { - pref->ref = ptr; + const char *c = (const char *) data; + char *p = XALLOCAVEC (char, GET_MODE_SIZE (mode)); - if (!addr && POINTER_TYPE_P (TREE_TYPE (ptr))) - { - /* Set the maximum size if the reference is to the pointer - itself (as opposed to what it points to), and clear - BASE0 since the offset isn't necessarily zero-based. */ - pref->set_max_size_range (); - pref->base0 = false; - return true; - } + memset (p, *c, GET_MODE_SIZE (mode)); - if (tree size = decl_init_size (ptr, false)) - if (TREE_CODE (size) == INTEGER_CST) - { - pref->sizrng[0] = pref->sizrng[1] = wi::to_offset (size); - return true; - } + return c_readstr (p, mode); +} - pref->set_max_size_range (); - return true; - } +/* Callback routine for store_by_pieces. Return the RTL of a register + containing GET_MODE_SIZE (MODE) consecutive copies of the unsigned + char value given in the RTL register data. For example, if mode is + 4 bytes wide, return the RTL for 0x01010101*data. If PREV isn't + nullptr, it has the RTL info from the previous iteration. */ - const tree_code code = TREE_CODE (ptr); - range_query *const rvals = qry ? qry->rvals : NULL; +static rtx +builtin_memset_gen_str (void *data, void *prevp, + HOST_WIDE_INT offset ATTRIBUTE_UNUSED, + scalar_int_mode mode) +{ + rtx target, coeff; + size_t size; + char *p; - if (code == BIT_FIELD_REF) + by_pieces_prev *prev = (by_pieces_prev *) prevp; + if (prev != nullptr && prev->data != nullptr) { - tree ref = TREE_OPERAND (ptr, 0); - if (!compute_objsize_r (ref, ostype, pref, snlim, qry)) - return false; + /* Use the previous data in the same mode. */ + if (prev->mode == mode) + return prev->data; - offset_int off = wi::to_offset (pref->eval (TREE_OPERAND (ptr, 2))); - pref->add_offset (off / BITS_PER_UNIT); - return true; + target = simplify_gen_subreg (mode, prev->data, prev->mode, 0); + if (target != nullptr) + return target; } - if (code == COMPONENT_REF) - { - tree ref = TREE_OPERAND (ptr, 0); - if (TREE_CODE (TREE_TYPE (ref)) == UNION_TYPE) - /* In accesses through union types consider the entire unions - rather than just their members. */ - ostype = 0; - tree field = TREE_OPERAND (ptr, 1); + size = GET_MODE_SIZE (mode); + if (size == 1) + return (rtx) data; - if (ostype == 0) - { - /* In OSTYPE zero (for raw memory functions like memcpy), use - the maximum size instead if the identity of the enclosing - object cannot be determined. */ - if (!compute_objsize_r (ref, ostype, pref, snlim, qry)) - return false; - - /* Otherwise, use the size of the enclosing object and add - the offset of the member to the offset computed so far. */ - tree offset = byte_position (field); - if (TREE_CODE (offset) == INTEGER_CST) - pref->add_offset (wi::to_offset (offset)); - else - pref->add_max_offset (); - - if (!pref->ref) - /* REF may have been already set to an SSA_NAME earlier - to provide better context for diagnostics. In that case, - leave it unchanged. */ - pref->ref = ref; - return true; - } + p = XALLOCAVEC (char, size); + memset (p, 1, size); + coeff = c_readstr (p, mode); - pref->ref = field; + target = convert_to_mode (mode, (rtx) data, 1); + target = expand_mult (mode, target, coeff, NULL_RTX, 1); + return force_reg (mode, target); +} - if (!addr && POINTER_TYPE_P (TREE_TYPE (field))) - { - /* Set maximum size if the reference is to the pointer member - itself (as opposed to what it points to). */ - pref->set_max_size_range (); - return true; - } +/* Expand expression EXP, which is a call to the memset builtin. Return + NULL_RTX if we failed the caller should emit a normal call, otherwise + try to get the result in TARGET, if convenient (and in mode MODE if that's + convenient). */ - /* SAM is set for array members that might need special treatment. */ - special_array_member sam; - tree size = component_ref_size (ptr, &sam); - if (sam == special_array_member::int_0) - pref->sizrng[0] = pref->sizrng[1] = 0; - else if (!pref->trail1special && sam == special_array_member::trail_1) - pref->sizrng[0] = pref->sizrng[1] = 1; - else if (size && TREE_CODE (size) == INTEGER_CST) - pref->sizrng[0] = pref->sizrng[1] = wi::to_offset (size); - else - { - /* When the size of the member is unknown it's either a flexible - array member or a trailing special array member (either zero - length or one-element). Set the size to the maximum minus - the constant size of the type. */ - pref->sizrng[0] = 0; - pref->sizrng[1] = wi::to_offset (TYPE_MAX_VALUE (ptrdiff_type_node)); - if (tree recsize = TYPE_SIZE_UNIT (TREE_TYPE (ref))) - if (TREE_CODE (recsize) == INTEGER_CST) - pref->sizrng[1] -= wi::to_offset (recsize); - } - return true; - } +static rtx +expand_builtin_memset (tree exp, rtx target, machine_mode mode) +{ + if (!validate_arglist (exp, + POINTER_TYPE, INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE)) + return NULL_RTX; - if (code == ARRAY_REF) - return handle_array_ref (ptr, addr, ostype, pref, snlim, qry); + tree dest = CALL_EXPR_ARG (exp, 0); + tree val = CALL_EXPR_ARG (exp, 1); + tree len = CALL_EXPR_ARG (exp, 2); - if (code == MEM_REF) - return handle_mem_ref (ptr, ostype, pref, snlim, qry); + check_memop_access (exp, dest, NULL_TREE, len); - if (code == TARGET_MEM_REF) - { - tree ref = TREE_OPERAND (ptr, 0); - if (!compute_objsize_r (ref, ostype, pref, snlim, qry)) - return false; + return expand_builtin_memset_args (dest, val, len, target, mode, exp); +} - /* TODO: Handle remaining operands. Until then, add maximum offset. */ - pref->ref = ptr; - pref->add_max_offset (); - return true; - } +/* Try to store VAL (or, if NULL_RTX, VALC) in LEN bytes starting at TO. + Return TRUE if successful, FALSE otherwise. TO is assumed to be + aligned at an ALIGN-bits boundary. LEN must be a multiple of + 1<set_max_size_range (); - else - pref->sizrng[0] = pref->sizrng[1] = 0; - pref->ref = ptr; + The strategy is to issue one store_by_pieces for each power of two, + from most to least significant, guarded by a test on whether there + are at least that many bytes left to copy in LEN. - return true; - } + ??? Should we skip some powers of two in favor of loops? Maybe start + at the max of TO/LEN/word alignment, at least when optimizing for + size, instead of ensuring O(log len) dynamic compares? */ - if (code == STRING_CST) - { - pref->sizrng[0] = pref->sizrng[1] = TREE_STRING_LENGTH (ptr); - pref->ref = ptr; - return true; - } +bool +try_store_by_multiple_pieces (rtx to, rtx len, unsigned int ctz_len, + unsigned HOST_WIDE_INT min_len, + unsigned HOST_WIDE_INT max_len, + rtx val, char valc, unsigned int align) +{ + int max_bits = floor_log2 (max_len); + int min_bits = floor_log2 (min_len); + int sctz_len = ctz_len; - if (code == POINTER_PLUS_EXPR) - { - tree ref = TREE_OPERAND (ptr, 0); - if (!compute_objsize_r (ref, ostype, pref, snlim, qry)) - return false; + gcc_checking_assert (sctz_len >= 0); - /* Clear DEREF since the offset is being applied to the target - of the dereference. */ - pref->deref = 0; + if (val) + valc = 1; - offset_int orng[2]; - tree off = pref->eval (TREE_OPERAND (ptr, 1)); - if (get_offset_range (off, NULL, orng, rvals)) - pref->add_offset (orng[0], orng[1]); - else - pref->add_max_offset (); - return true; - } + /* Bits more significant than TST_BITS are part of the shared prefix + in the binary representation of both min_len and max_len. Since + they're identical, we don't need to test them in the loop. */ + int tst_bits = (max_bits != min_bits ? max_bits + : floor_log2 (max_len ^ min_len)); - if (code == VIEW_CONVERT_EXPR) + /* Check whether it's profitable to start by storing a fixed BLKSIZE + bytes, to lower max_bits. In the unlikely case of a constant LEN + (implied by identical MAX_LEN and MIN_LEN), we want to issue a + single store_by_pieces, but otherwise, select the minimum multiple + of the ALIGN (in bytes) and of the MCD of the possible LENs, that + brings MAX_LEN below TST_BITS, if that's lower than min_len. */ + unsigned HOST_WIDE_INT blksize; + if (max_len > min_len) { - ptr = TREE_OPERAND (ptr, 0); - return compute_objsize_r (ptr, ostype, pref, snlim, qry); + unsigned HOST_WIDE_INT alrng = MAX (HOST_WIDE_INT_1U << ctz_len, + align / BITS_PER_UNIT); + blksize = max_len - (HOST_WIDE_INT_1U << tst_bits) + alrng; + blksize &= ~(alrng - 1); } - - if (code == SSA_NAME) + else if (max_len == min_len) + blksize = max_len; + else + gcc_unreachable (); + if (min_len >= blksize) { - if (!snlim.next ()) - return false; + min_len -= blksize; + min_bits = floor_log2 (min_len); + max_len -= blksize; + max_bits = floor_log2 (max_len); - /* Only process an SSA_NAME if the recursion limit has not yet - been reached. */ - if (qry) - { - if (++qry->depth) - qry->max_depth = qry->depth; - if (const access_ref *cache_ref = qry->get_ref (ptr)) - { - /* If the pointer is in the cache set *PREF to what it refers - to and return success. */ - *pref = *cache_ref; - return true; - } - } + tst_bits = (max_bits != min_bits ? max_bits + : floor_log2 (max_len ^ min_len)); + } + else + blksize = 0; - gimple *stmt = SSA_NAME_DEF_STMT (ptr); - if (is_gimple_call (stmt)) - { - /* If STMT is a call to an allocation function get the size - from its argument(s). If successful, also set *PREF->REF - to PTR for the caller to include in diagnostics. */ - wide_int wr[2]; - if (gimple_call_alloc_size (stmt, wr, rvals)) - { - pref->ref = ptr; - pref->sizrng[0] = offset_int::from (wr[0], UNSIGNED); - pref->sizrng[1] = offset_int::from (wr[1], UNSIGNED); - /* Constrain both bounds to a valid size. */ - offset_int maxsize = wi::to_offset (max_object_size ()); - if (pref->sizrng[0] > maxsize) - pref->sizrng[0] = maxsize; - if (pref->sizrng[1] > maxsize) - pref->sizrng[1] = maxsize; - } - else - { - /* For functions known to return one of their pointer arguments - try to determine what the returned pointer points to, and on - success add OFFRNG which was set to the offset added by - the function (e.g., memchr or stpcpy) to the overall offset. - */ - bool past_end; - offset_int offrng[2]; - if (tree ret = gimple_call_return_array (stmt, offrng, - &past_end, rvals)) - { - if (!compute_objsize_r (ret, ostype, pref, snlim, qry)) - return false; - - /* Cap OFFRNG[1] to at most the remaining size of - the object. */ - offset_int remrng[2]; - remrng[1] = pref->size_remaining (remrng); - if (remrng[1] != 0 && !past_end) - /* Decrement the size for functions that never return - a past-the-end pointer. */ - remrng[1] -= 1; - - if (remrng[1] < offrng[1]) - offrng[1] = remrng[1]; - pref->add_offset (offrng[0], offrng[1]); - } - else - { - /* For other calls that might return arbitrary pointers - including into the middle of objects set the size - range to maximum, clear PREF->BASE0, and also set - PREF->REF to include in diagnostics. */ - pref->set_max_size_range (); - pref->base0 = false; - pref->ref = ptr; - } - } - qry->put_ref (ptr, *pref); - return true; - } + /* Check that we can use store by pieces for the maximum store count + we may issue (initial fixed-size block, plus conditional + power-of-two-sized from max_bits to ctz_len. */ + unsigned HOST_WIDE_INT xlenest = blksize; + if (max_bits >= 0) + xlenest += ((HOST_WIDE_INT_1U << max_bits) * 2 + - (HOST_WIDE_INT_1U << ctz_len)); + if (!can_store_by_pieces (xlenest, builtin_memset_read_str, + &valc, align, true)) + return false; - if (gimple_nop_p (stmt)) - { - /* For a function argument try to determine the byte size - of the array from the current function declaratation - (e.g., attribute access or related). */ - wide_int wr[2]; - bool static_array = false; - if (tree ref = gimple_parm_array_size (ptr, wr, &static_array)) - { - pref->parmarray = !static_array; - pref->sizrng[0] = offset_int::from (wr[0], UNSIGNED); - pref->sizrng[1] = offset_int::from (wr[1], UNSIGNED); - pref->ref = ref; - qry->put_ref (ptr, *pref); - return true; - } + rtx (*constfun) (void *, void *, HOST_WIDE_INT, scalar_int_mode); + void *constfundata; + if (val) + { + constfun = builtin_memset_gen_str; + constfundata = val = force_reg (TYPE_MODE (unsigned_char_type_node), + val); + } + else + { + constfun = builtin_memset_read_str; + constfundata = &valc; + } - pref->set_max_size_range (); - pref->base0 = false; - pref->ref = ptr; - qry->put_ref (ptr, *pref); - return true; - } + rtx ptr = copy_addr_to_reg (convert_to_mode (ptr_mode, XEXP (to, 0), 0)); + rtx rem = copy_to_mode_reg (ptr_mode, convert_to_mode (ptr_mode, len, 0)); + to = replace_equiv_address (to, ptr); + set_mem_align (to, align); - if (gimple_code (stmt) == GIMPLE_PHI) - { - pref->ref = ptr; - access_ref phi_ref = *pref; - if (!pref->get_ref (NULL, &phi_ref, ostype, &snlim, qry)) - return false; - *pref = phi_ref; - pref->ref = ptr; - qry->put_ref (ptr, *pref); - return true; - } + if (blksize) + { + to = store_by_pieces (to, blksize, + constfun, constfundata, + align, true, + max_len != 0 ? RETURN_END : RETURN_BEGIN); + if (max_len == 0) + return true; - if (!is_gimple_assign (stmt)) - { - /* Clear BASE0 since the assigned pointer might point into - the middle of the object, set the maximum size range and, - if the SSA_NAME refers to a function argumnent, set - PREF->REF to it. */ - pref->base0 = false; - pref->set_max_size_range (); - pref->ref = ptr; - return true; - } + /* Adjust PTR, TO and REM. Since TO's address is likely + PTR+offset, we have to replace it. */ + emit_move_insn (ptr, force_operand (XEXP (to, 0), NULL_RTX)); + to = replace_equiv_address (to, ptr); + rtx rem_minus_blksize = plus_constant (ptr_mode, rem, -blksize); + emit_move_insn (rem, force_operand (rem_minus_blksize, NULL_RTX)); + } - tree_code code = gimple_assign_rhs_code (stmt); + /* Iterate over power-of-two block sizes from the maximum length to + the least significant bit possibly set in the length. */ + for (int i = max_bits; i >= sctz_len; i--) + { + rtx_code_label *label = NULL; + blksize = HOST_WIDE_INT_1U << i; - if (code == MAX_EXPR || code == MIN_EXPR) + /* If we're past the bits shared between min_ and max_len, expand + a test on the dynamic length, comparing it with the + BLKSIZE. */ + if (i <= tst_bits) { - if (!handle_min_max_size (stmt, ostype, pref, snlim, qry)) - return false; - qry->put_ref (ptr, *pref); - return true; + label = gen_label_rtx (); + emit_cmp_and_jump_insns (rem, GEN_INT (blksize), LT, NULL, + ptr_mode, 1, label, + profile_probability::even ()); } + /* If we are at a bit that is in the prefix shared by min_ and + max_len, skip this BLKSIZE if the bit is clear. */ + else if ((max_len & blksize) == 0) + continue; - tree rhs = gimple_assign_rhs1 (stmt); + /* Issue a store of BLKSIZE bytes. */ + to = store_by_pieces (to, blksize, + constfun, constfundata, + align, true, + i != sctz_len ? RETURN_END : RETURN_BEGIN); - if (code == ASSERT_EXPR) + /* Adjust REM and PTR, unless this is the last iteration. */ + if (i != sctz_len) { - rhs = TREE_OPERAND (rhs, 0); - return compute_objsize_r (rhs, ostype, pref, snlim, qry); + emit_move_insn (ptr, force_operand (XEXP (to, 0), NULL_RTX)); + to = replace_equiv_address (to, ptr); + rtx rem_minus_blksize = plus_constant (ptr_mode, rem, -blksize); + emit_move_insn (rem, force_operand (rem_minus_blksize, NULL_RTX)); } - if (code == POINTER_PLUS_EXPR - && TREE_CODE (TREE_TYPE (rhs)) == POINTER_TYPE) + if (label) { - /* Compute the size of the object first. */ - if (!compute_objsize_r (rhs, ostype, pref, snlim, qry)) - return false; - - offset_int orng[2]; - tree off = gimple_assign_rhs2 (stmt); - if (get_offset_range (off, stmt, orng, rvals)) - pref->add_offset (orng[0], orng[1]); - else - pref->add_max_offset (); - qry->put_ref (ptr, *pref); - return true; - } - - if (code == ADDR_EXPR - || code == SSA_NAME) - return compute_objsize_r (rhs, ostype, pref, snlim, qry); + emit_label (label); - /* (This could also be an assignment from a nonlocal pointer.) Save - PTR to mention in diagnostics but otherwise treat it as a pointer - to an unknown object. */ - pref->ref = rhs; - pref->base0 = false; - pref->set_max_size_range (); - return true; + /* Given conditional stores, the offset can no longer be + known, so clear it. */ + clear_mem_offset (to); + } } - /* Assume all other expressions point into an unknown object - of the maximum valid size. */ - pref->ref = ptr; - pref->base0 = false; - pref->set_max_size_range (); - if (TREE_CODE (ptr) == SSA_NAME) - qry->put_ref (ptr, *pref); return true; } -/* A "public" wrapper around the above. Clients should use this overload - instead. */ +/* Helper function to do the actual work for expand_builtin_memset. The + arguments to the builtin_memset call DEST, VAL, and LEN are broken out + so that this can also be called without constructing an actual CALL_EXPR. + The other arguments and return value are the same as for + expand_builtin_memset. */ -tree -compute_objsize (tree ptr, int ostype, access_ref *pref, - range_query *rvals /* = NULL */) +static rtx +expand_builtin_memset_args (tree dest, tree val, tree len, + rtx target, machine_mode mode, tree orig_exp) { - pointer_query qry; - qry.rvals = rvals; - ssa_name_limit_t snlim; - if (!compute_objsize_r (ptr, ostype, pref, snlim, &qry)) - return NULL_TREE; - - offset_int maxsize = pref->size_remaining (); - if (pref->base0 && pref->offrng[0] < 0 && pref->offrng[1] >= 0) - pref->offrng[0] = 0; - return wide_int_to_tree (sizetype, maxsize); -} + tree fndecl, fn; + enum built_in_function fcode; + machine_mode val_mode; + char c; + unsigned int dest_align; + rtx dest_mem, dest_addr, len_rtx; + HOST_WIDE_INT expected_size = -1; + unsigned int expected_align = 0; + unsigned HOST_WIDE_INT min_size; + unsigned HOST_WIDE_INT max_size; + unsigned HOST_WIDE_INT probable_max_size; -/* Transitional wrapper. The function should be removed once callers - transition to the pointer_query API. */ + dest_align = get_pointer_alignment (dest); -tree -compute_objsize (tree ptr, int ostype, access_ref *pref, pointer_query *ptr_qry) -{ - pointer_query qry; - if (ptr_qry) - ptr_qry->depth = 0; - else - ptr_qry = &qry; + /* If DEST is not a pointer type, don't do this operation in-line. */ + if (dest_align == 0) + return NULL_RTX; - ssa_name_limit_t snlim; - if (!compute_objsize_r (ptr, ostype, pref, snlim, ptr_qry)) - return NULL_TREE; + if (currently_expanding_gimple_stmt) + stringop_block_profile (currently_expanding_gimple_stmt, + &expected_align, &expected_size); - offset_int maxsize = pref->size_remaining (); - if (pref->base0 && pref->offrng[0] < 0 && pref->offrng[1] >= 0) - pref->offrng[0] = 0; - return wide_int_to_tree (sizetype, maxsize); -} + if (expected_align < dest_align) + expected_align = dest_align; -/* Legacy wrapper around the above. The function should be removed - once callers transition to one of the two above. */ + /* If the LEN parameter is zero, return DEST. */ + if (integer_zerop (len)) + { + /* Evaluate and ignore VAL in case it has side-effects. */ + expand_expr (val, const0_rtx, VOIDmode, EXPAND_NORMAL); + return expand_expr (dest, target, mode, EXPAND_NORMAL); + } -tree -compute_objsize (tree ptr, int ostype, tree *pdecl /* = NULL */, - tree *poff /* = NULL */, range_query *rvals /* = NULL */) -{ - /* Set the initial offsets to zero and size to negative to indicate - none has been computed yet. */ - access_ref ref; - tree size = compute_objsize (ptr, ostype, &ref, rvals); - if (!size || !ref.base0) - return NULL_TREE; + /* Stabilize the arguments in case we fail. */ + dest = builtin_save_expr (dest); + val = builtin_save_expr (val); + len = builtin_save_expr (len); - if (pdecl) - *pdecl = ref.ref; + len_rtx = expand_normal (len); + determine_block_size (len, len_rtx, &min_size, &max_size, + &probable_max_size); + dest_mem = get_memory_rtx (dest, len); + val_mode = TYPE_MODE (unsigned_char_type_node); - if (poff) - *poff = wide_int_to_tree (ptrdiff_type_node, ref.offrng[ref.offrng[0] < 0]); + if (TREE_CODE (val) != INTEGER_CST + || target_char_cast (val, &c)) + { + rtx val_rtx; - return size; -} + val_rtx = expand_normal (val); + val_rtx = convert_to_mode (val_mode, val_rtx, 0); -/* Helper to determine and check the sizes of the source and the destination - of calls to __builtin_{bzero,memcpy,mempcpy,memset} calls. EXP is the - call expression, DEST is the destination argument, SRC is the source - argument or null, and LEN is the number of bytes. Use Object Size type-0 - regardless of the OPT_Wstringop_overflow_ setting. Return true on success - (no overflow or invalid sizes), false otherwise. */ + /* Assume that we can memset by pieces if we can store + * the coefficients by pieces (in the required modes). + * We can't pass builtin_memset_gen_str as that emits RTL. */ + c = 1; + if (tree_fits_uhwi_p (len) + && can_store_by_pieces (tree_to_uhwi (len), + builtin_memset_read_str, &c, dest_align, + true)) + { + val_rtx = force_reg (val_mode, val_rtx); + store_by_pieces (dest_mem, tree_to_uhwi (len), + builtin_memset_gen_str, val_rtx, dest_align, + true, RETURN_BEGIN); + } + else if (!set_storage_via_setmem (dest_mem, len_rtx, val_rtx, + dest_align, expected_align, + expected_size, min_size, max_size, + probable_max_size) + && !try_store_by_multiple_pieces (dest_mem, len_rtx, + tree_ctz (len), + min_size, max_size, + val_rtx, 0, + dest_align)) + goto do_libcall; -static bool -check_memop_access (tree exp, tree dest, tree src, tree size) -{ - /* For functions like memset and memcpy that operate on raw memory - try to determine the size of the largest source and destination - object using type-0 Object Size regardless of the object size - type specified by the option. */ - access_data data (exp, access_read_write); - tree srcsize = src ? compute_objsize (src, 0, &data.src) : NULL_TREE; - tree dstsize = compute_objsize (dest, 0, &data.dst); + dest_mem = force_operand (XEXP (dest_mem, 0), NULL_RTX); + dest_mem = convert_memory_address (ptr_mode, dest_mem); + return dest_mem; + } - return check_access (exp, size, /*maxread=*/NULL_TREE, - srcsize, dstsize, data.mode, &data); -} + if (c) + { + if (tree_fits_uhwi_p (len) + && can_store_by_pieces (tree_to_uhwi (len), + builtin_memset_read_str, &c, dest_align, + true)) + store_by_pieces (dest_mem, tree_to_uhwi (len), + builtin_memset_read_str, &c, dest_align, true, + RETURN_BEGIN); + else if (!set_storage_via_setmem (dest_mem, len_rtx, + gen_int_mode (c, val_mode), + dest_align, expected_align, + expected_size, min_size, max_size, + probable_max_size) + && !try_store_by_multiple_pieces (dest_mem, len_rtx, + tree_ctz (len), + min_size, max_size, + NULL_RTX, c, + dest_align)) + goto do_libcall; -/* Validate memchr arguments without performing any expansion. - Return NULL_RTX. */ + dest_mem = force_operand (XEXP (dest_mem, 0), NULL_RTX); + dest_mem = convert_memory_address (ptr_mode, dest_mem); + return dest_mem; + } -static rtx -expand_builtin_memchr (tree exp, rtx) -{ - if (!validate_arglist (exp, - POINTER_TYPE, INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE)) - return NULL_RTX; + set_mem_align (dest_mem, dest_align); + dest_addr = clear_storage_hints (dest_mem, len_rtx, + CALL_EXPR_TAILCALL (orig_exp) + ? BLOCK_OP_TAILCALL : BLOCK_OP_NORMAL, + expected_align, expected_size, + min_size, max_size, + probable_max_size, tree_ctz (len)); - tree arg1 = CALL_EXPR_ARG (exp, 0); - tree len = CALL_EXPR_ARG (exp, 2); + if (dest_addr == 0) + { + dest_addr = force_operand (XEXP (dest_mem, 0), NULL_RTX); + dest_addr = convert_memory_address (ptr_mode, dest_addr); + } - check_read_access (exp, arg1, len, 0); + return dest_addr; - return NULL_RTX; + do_libcall: + fndecl = get_callee_fndecl (orig_exp); + fcode = DECL_FUNCTION_CODE (fndecl); + if (fcode == BUILT_IN_MEMSET) + fn = build_call_nofold_loc (EXPR_LOCATION (orig_exp), fndecl, 3, + dest, val, len); + else if (fcode == BUILT_IN_BZERO) + fn = build_call_nofold_loc (EXPR_LOCATION (orig_exp), fndecl, 2, + dest, len); + else + gcc_unreachable (); + gcc_assert (TREE_CODE (fn) == CALL_EXPR); + CALL_EXPR_TAILCALL (fn) = CALL_EXPR_TAILCALL (orig_exp); + return expand_call (fn, target, target == const0_rtx); } -/* Expand a call EXP to the memcpy builtin. - Return NULL_RTX if we failed, the caller should emit a normal call, - otherwise try to get the result in TARGET, if convenient (and in - mode MODE if that's convenient). */ +/* Expand expression EXP, which is a call to the bzero builtin. Return + NULL_RTX if we failed the caller should emit a normal call. */ static rtx -expand_builtin_memcpy (tree exp, rtx target) +expand_builtin_bzero (tree exp) { - if (!validate_arglist (exp, - POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) + if (!validate_arglist (exp, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return NULL_RTX; tree dest = CALL_EXPR_ARG (exp, 0); - tree src = CALL_EXPR_ARG (exp, 1); - tree len = CALL_EXPR_ARG (exp, 2); + tree size = CALL_EXPR_ARG (exp, 1); - check_memop_access (exp, dest, src, len); + check_memop_access (exp, dest, NULL_TREE, size); - return expand_builtin_memory_copy_args (dest, src, len, target, exp, - /*retmode=*/ RETURN_BEGIN, false); + /* New argument list transforming bzero(ptr x, int y) to + memset(ptr x, int 0, size_t y). This is done this way + so that if it isn't expanded inline, we fallback to + calling bzero instead of memset. */ + + location_t loc = EXPR_LOCATION (exp); + + return expand_builtin_memset_args (dest, integer_zero_node, + fold_convert_loc (loc, + size_type_node, size), + const0_rtx, VOIDmode, exp); } -/* Check a call EXP to the memmove built-in for validity. - Return NULL_RTX on both success and failure. */ +/* Try to expand cmpstr operation ICODE with the given operands. + Return the result rtx on success, otherwise return null. */ static rtx -expand_builtin_memmove (tree exp, rtx target) +expand_cmpstr (insn_code icode, rtx target, rtx arg1_rtx, rtx arg2_rtx, + HOST_WIDE_INT align) { - if (!validate_arglist (exp, - POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) - return NULL_RTX; - - tree dest = CALL_EXPR_ARG (exp, 0); - tree src = CALL_EXPR_ARG (exp, 1); - tree len = CALL_EXPR_ARG (exp, 2); + machine_mode insn_mode = insn_data[icode].operand[0].mode; - check_memop_access (exp, dest, src, len); + if (target && (!REG_P (target) || HARD_REGISTER_P (target))) + target = NULL_RTX; - return expand_builtin_memory_copy_args (dest, src, len, target, exp, - /*retmode=*/ RETURN_BEGIN, true); + class expand_operand ops[4]; + create_output_operand (&ops[0], target, insn_mode); + create_fixed_operand (&ops[1], arg1_rtx); + create_fixed_operand (&ops[2], arg2_rtx); + create_integer_operand (&ops[3], align); + if (maybe_expand_insn (icode, 4, ops)) + return ops[0].value; + return NULL_RTX; } -/* Expand a call EXP to the mempcpy builtin. - Return NULL_RTX if we failed; the caller should emit a normal call, - otherwise try to get the result in TARGET, if convenient (and in - mode MODE if that's convenient). */ +/* Expand expression EXP, which is a call to the memcmp built-in function. + Return NULL_RTX if we failed and the caller should emit a normal call, + otherwise try to get the result in TARGET, if convenient. + RESULT_EQ is true if we can relax the returned value to be either zero + or nonzero, without caring about the sign. */ static rtx -expand_builtin_mempcpy (tree exp, rtx target) +expand_builtin_memcmp (tree exp, rtx target, bool result_eq) { if (!validate_arglist (exp, POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return NULL_RTX; - tree dest = CALL_EXPR_ARG (exp, 0); - tree src = CALL_EXPR_ARG (exp, 1); + tree arg1 = CALL_EXPR_ARG (exp, 0); + tree arg2 = CALL_EXPR_ARG (exp, 1); tree len = CALL_EXPR_ARG (exp, 2); - /* Policy does not generally allow using compute_objsize (which - is used internally by check_memop_size) to change code generation - or drive optimization decisions. - - In this instance it is safe because the code we generate has - the same semantics regardless of the return value of - check_memop_sizes. Exactly the same amount of data is copied - and the return value is exactly the same in both cases. - - Furthermore, check_memop_size always uses mode 0 for the call to - compute_objsize, so the imprecise nature of compute_objsize is - avoided. */ - - /* Avoid expanding mempcpy into memcpy when the call is determined - to overflow the buffer. This also prevents the same overflow - from being diagnosed again when expanding memcpy. */ - if (!check_memop_access (exp, dest, src, len)) + /* Diagnose calls where the specified length exceeds the size of either + object. */ + if (!check_read_access (exp, arg1, len, 0) + || !check_read_access (exp, arg2, len, 0)) return NULL_RTX; - return expand_builtin_mempcpy_args (dest, src, len, - target, exp, /*retmode=*/ RETURN_END); -} - -/* Helper function to do the actual work for expand of memory copy family - functions (memcpy, mempcpy, stpcpy). Expansing should assign LEN bytes - of memory from SRC to DEST and assign to TARGET if convenient. Return - value is based on RETMODE argument. */ + /* Due to the performance benefit, always inline the calls first + when result_eq is false. */ + rtx result = NULL_RTX; + enum built_in_function fcode = DECL_FUNCTION_CODE (get_callee_fndecl (exp)); + if (!result_eq && fcode != BUILT_IN_BCMP) + { + result = inline_expand_builtin_bytecmp (exp, target); + if (result) + return result; + } -static rtx -expand_builtin_memory_copy_args (tree dest, tree src, tree len, - rtx target, tree exp, memop_ret retmode, - bool might_overlap) -{ - unsigned int src_align = get_pointer_alignment (src); - unsigned int dest_align = get_pointer_alignment (dest); - rtx dest_mem, src_mem, dest_addr, len_rtx; - HOST_WIDE_INT expected_size = -1; - unsigned int expected_align = 0; - unsigned HOST_WIDE_INT min_size; - unsigned HOST_WIDE_INT max_size; - unsigned HOST_WIDE_INT probable_max_size; + machine_mode mode = TYPE_MODE (TREE_TYPE (exp)); + location_t loc = EXPR_LOCATION (exp); - bool is_move_done; + unsigned int arg1_align = get_pointer_alignment (arg1) / BITS_PER_UNIT; + unsigned int arg2_align = get_pointer_alignment (arg2) / BITS_PER_UNIT; - /* If DEST is not a pointer type, call the normal function. */ - if (dest_align == 0) + /* If we don't have POINTER_TYPE, call the function. */ + if (arg1_align == 0 || arg2_align == 0) return NULL_RTX; - /* If either SRC is not a pointer type, don't do this - operation in-line. */ - if (src_align == 0) - return NULL_RTX; + rtx arg1_rtx = get_memory_rtx (arg1, len); + rtx arg2_rtx = get_memory_rtx (arg2, len); + rtx len_rtx = expand_normal (fold_convert_loc (loc, sizetype, len)); - if (currently_expanding_gimple_stmt) - stringop_block_profile (currently_expanding_gimple_stmt, - &expected_align, &expected_size); + /* Set MEM_SIZE as appropriate. */ + if (CONST_INT_P (len_rtx)) + { + set_mem_size (arg1_rtx, INTVAL (len_rtx)); + set_mem_size (arg2_rtx, INTVAL (len_rtx)); + } - if (expected_align < dest_align) - expected_align = dest_align; - dest_mem = get_memory_rtx (dest, len); - set_mem_align (dest_mem, dest_align); - len_rtx = expand_normal (len); - determine_block_size (len, len_rtx, &min_size, &max_size, - &probable_max_size); + by_pieces_constfn constfn = NULL; - /* Try to get the byte representation of the constant SRC points to, - with its byte size in NBYTES. */ + /* Try to get the byte representation of the constant ARG2 (or, only + when the function's result is used for equality to zero, ARG1) + points to, with its byte size in NBYTES. */ unsigned HOST_WIDE_INT nbytes; - const char *rep = getbyterep (src, &nbytes); + const char *rep = getbyterep (arg2, &nbytes); + if (result_eq && rep == NULL) + { + /* For equality to zero the arguments are interchangeable. */ + rep = getbyterep (arg1, &nbytes); + if (rep != NULL) + std::swap (arg1_rtx, arg2_rtx); + } /* If the function's constant bound LEN_RTX is less than or equal to the byte size of the representation of the constant argument, and if block move would be done by pieces, we can avoid loading - the bytes from memory and only store the computed constant. - This works in the overlap (memmove) case as well because - store_by_pieces just generates a series of stores of constants - from the representation returned by getbyterep(). */ + the bytes from memory and only store the computed constant result. */ if (rep && CONST_INT_P (len_rtx) - && (unsigned HOST_WIDE_INT) INTVAL (len_rtx) <= nbytes - && can_store_by_pieces (INTVAL (len_rtx), builtin_memcpy_read_str, - CONST_CAST (char *, rep), - dest_align, false)) - { - dest_mem = store_by_pieces (dest_mem, INTVAL (len_rtx), - builtin_memcpy_read_str, - CONST_CAST (char *, rep), - dest_align, false, retmode); - dest_mem = force_operand (XEXP (dest_mem, 0), target); - dest_mem = convert_memory_address (ptr_mode, dest_mem); - return dest_mem; - } + && (unsigned HOST_WIDE_INT) INTVAL (len_rtx) <= nbytes) + constfn = builtin_memcpy_read_str; - src_mem = get_memory_rtx (src, len); - set_mem_align (src_mem, src_align); + result = emit_block_cmp_hints (arg1_rtx, arg2_rtx, len_rtx, + TREE_TYPE (len), target, + result_eq, constfn, + CONST_CAST (char *, rep)); - /* Copy word part most expediently. */ - enum block_op_methods method = BLOCK_OP_NORMAL; - if (CALL_EXPR_TAILCALL (exp) - && (retmode == RETURN_BEGIN || target == const0_rtx)) - method = BLOCK_OP_TAILCALL; - bool use_mempcpy_call = (targetm.libc_has_fast_function (BUILT_IN_MEMPCPY) - && retmode == RETURN_END - && !might_overlap - && target != const0_rtx); - if (use_mempcpy_call) - method = BLOCK_OP_NO_LIBCALL_RET; - dest_addr = emit_block_move_hints (dest_mem, src_mem, len_rtx, method, - expected_align, expected_size, - min_size, max_size, probable_max_size, - use_mempcpy_call, &is_move_done, - might_overlap); + if (result) + { + /* Return the value in the proper mode for this function. */ + if (GET_MODE (result) == mode) + return result; - /* Bail out when a mempcpy call would be expanded as libcall and when - we have a target that provides a fast implementation - of mempcpy routine. */ - if (!is_move_done) - return NULL_RTX; + if (target != 0) + { + convert_move (target, result, 0); + return target; + } - if (dest_addr == pc_rtx) - return NULL_RTX; + return convert_to_mode (mode, result, 0); + } - if (dest_addr == 0) - { - dest_addr = force_operand (XEXP (dest_mem, 0), target); - dest_addr = convert_memory_address (ptr_mode, dest_addr); - } - - if (retmode != RETURN_BEGIN && target != const0_rtx) - { - dest_addr = gen_rtx_PLUS (ptr_mode, dest_addr, len_rtx); - /* stpcpy pointer to last byte. */ - if (retmode == RETURN_END_MINUS_ONE) - dest_addr = gen_rtx_MINUS (ptr_mode, dest_addr, const1_rtx); - } - - return dest_addr; -} - -static rtx -expand_builtin_mempcpy_args (tree dest, tree src, tree len, - rtx target, tree orig_exp, memop_ret retmode) -{ - return expand_builtin_memory_copy_args (dest, src, len, target, orig_exp, - retmode, false); + return NULL_RTX; } -/* Expand into a movstr instruction, if one is available. Return NULL_RTX if - we failed, the caller should emit a normal call, otherwise try to - get the result in TARGET, if convenient. - Return value is based on RETMODE argument. */ +/* Expand expression EXP, which is a call to the strcmp builtin. Return NULL_RTX + if we failed the caller should emit a normal call, otherwise try to get + the result in TARGET, if convenient. */ static rtx -expand_movstr (tree dest, tree src, rtx target, memop_ret retmode) +expand_builtin_strcmp (tree exp, ATTRIBUTE_UNUSED rtx target) { - class expand_operand ops[3]; - rtx dest_mem; - rtx src_mem; - - if (!targetm.have_movstr ()) + if (!validate_arglist (exp, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) return NULL_RTX; - dest_mem = get_memory_rtx (dest, NULL); - src_mem = get_memory_rtx (src, NULL); - if (retmode == RETURN_BEGIN) - { - target = force_reg (Pmode, XEXP (dest_mem, 0)); - dest_mem = replace_equiv_address (dest_mem, target); - } + tree arg1 = CALL_EXPR_ARG (exp, 0); + tree arg2 = CALL_EXPR_ARG (exp, 1); - create_output_operand (&ops[0], - retmode != RETURN_BEGIN ? target : NULL_RTX, Pmode); - create_fixed_operand (&ops[1], dest_mem); - create_fixed_operand (&ops[2], src_mem); - if (!maybe_expand_insn (targetm.code_for_movstr, 3, ops)) + if (!check_read_access (exp, arg1) + || !check_read_access (exp, arg2)) return NULL_RTX; - if (retmode != RETURN_BEGIN && target != const0_rtx) - { - target = ops[0].value; - /* movstr is supposed to set end to the address of the NUL - terminator. If the caller requested a mempcpy-like return value, - adjust it. */ - if (retmode == RETURN_END) - { - rtx tem = plus_constant (GET_MODE (target), - gen_lowpart (GET_MODE (target), target), 1); - emit_move_insn (target, force_operand (tem, NULL_RTX)); - } - } - return target; -} + /* Due to the performance benefit, always inline the calls first. */ + rtx result = NULL_RTX; + result = inline_expand_builtin_bytecmp (exp, target); + if (result) + return result; -/* Do some very basic size validation of a call to the strcpy builtin - given by EXP. Return NULL_RTX to have the built-in expand to a call - to the library function. */ + insn_code cmpstr_icode = direct_optab_handler (cmpstr_optab, SImode); + insn_code cmpstrn_icode = direct_optab_handler (cmpstrn_optab, SImode); + if (cmpstr_icode == CODE_FOR_nothing && cmpstrn_icode == CODE_FOR_nothing) + return NULL_RTX; -static rtx -expand_builtin_strcat (tree exp) -{ - if (!validate_arglist (exp, POINTER_TYPE, POINTER_TYPE, VOID_TYPE) - || !warn_stringop_overflow) + unsigned int arg1_align = get_pointer_alignment (arg1) / BITS_PER_UNIT; + unsigned int arg2_align = get_pointer_alignment (arg2) / BITS_PER_UNIT; + + /* If we don't have POINTER_TYPE, call the function. */ + if (arg1_align == 0 || arg2_align == 0) return NULL_RTX; - tree dest = CALL_EXPR_ARG (exp, 0); - tree src = CALL_EXPR_ARG (exp, 1); + /* Stabilize the arguments in case gen_cmpstr(n)si fail. */ + arg1 = builtin_save_expr (arg1); + arg2 = builtin_save_expr (arg2); - /* There is no way here to determine the length of the string in - the destination to which the SRC string is being appended so - just diagnose cases when the souce string is longer than - the destination object. */ - access_data data (exp, access_read_write, NULL_TREE, true, - NULL_TREE, true); - const int ost = warn_stringop_overflow ? warn_stringop_overflow - 1 : 1; - compute_objsize (src, ost, &data.src); - tree destsize = compute_objsize (dest, ost, &data.dst); + rtx arg1_rtx = get_memory_rtx (arg1, NULL); + rtx arg2_rtx = get_memory_rtx (arg2, NULL); - check_access (exp, /*dstwrite=*/NULL_TREE, /*maxread=*/NULL_TREE, - src, destsize, data.mode, &data); + /* Try to call cmpstrsi. */ + if (cmpstr_icode != CODE_FOR_nothing) + result = expand_cmpstr (cmpstr_icode, target, arg1_rtx, arg2_rtx, + MIN (arg1_align, arg2_align)); - return NULL_RTX; -} + /* Try to determine at least one length and call cmpstrnsi. */ + if (!result && cmpstrn_icode != CODE_FOR_nothing) + { + tree len; + rtx arg3_rtx; -/* Expand expression EXP, which is a call to the strcpy builtin. Return - NULL_RTX if we failed the caller should emit a normal call, otherwise - try to get the result in TARGET, if convenient (and in mode MODE if that's - convenient). */ + tree len1 = c_strlen (arg1, 1); + tree len2 = c_strlen (arg2, 1); -static rtx -expand_builtin_strcpy (tree exp, rtx target) -{ - if (!validate_arglist (exp, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) - return NULL_RTX; + if (len1) + len1 = size_binop (PLUS_EXPR, ssize_int (1), len1); + if (len2) + len2 = size_binop (PLUS_EXPR, ssize_int (1), len2); - tree dest = CALL_EXPR_ARG (exp, 0); - tree src = CALL_EXPR_ARG (exp, 1); + /* If we don't have a constant length for the first, use the length + of the second, if we know it. We don't require a constant for + this case; some cost analysis could be done if both are available + but neither is constant. For now, assume they're equally cheap, + unless one has side effects. If both strings have constant lengths, + use the smaller. */ - if (warn_stringop_overflow) - { - access_data data (exp, access_read_write, NULL_TREE, true, - NULL_TREE, true); - const int ost = warn_stringop_overflow ? warn_stringop_overflow - 1 : 1; - compute_objsize (src, ost, &data.src); - tree dstsize = compute_objsize (dest, ost, &data.dst); - check_access (exp, /*dstwrite=*/ NULL_TREE, - /*maxread=*/ NULL_TREE, /*srcstr=*/ src, - dstsize, data.mode, &data); + if (!len1) + len = len2; + else if (!len2) + len = len1; + else if (TREE_SIDE_EFFECTS (len1)) + len = len2; + else if (TREE_SIDE_EFFECTS (len2)) + len = len1; + else if (TREE_CODE (len1) != INTEGER_CST) + len = len2; + else if (TREE_CODE (len2) != INTEGER_CST) + len = len1; + else if (tree_int_cst_lt (len1, len2)) + len = len1; + else + len = len2; + + /* If both arguments have side effects, we cannot optimize. */ + if (len && !TREE_SIDE_EFFECTS (len)) + { + arg3_rtx = expand_normal (len); + result = expand_cmpstrn_or_cmpmem + (cmpstrn_icode, target, arg1_rtx, arg2_rtx, TREE_TYPE (len), + arg3_rtx, MIN (arg1_align, arg2_align)); + } } - if (rtx ret = expand_builtin_strcpy_args (exp, dest, src, target)) + tree fndecl = get_callee_fndecl (exp); + if (result) { /* Check to see if the argument was declared attribute nonstring and if so, issue a warning since at this point it's not known to be nul-terminated. */ - tree fndecl = get_callee_fndecl (exp); maybe_warn_nonstring_arg (fndecl, exp); - return ret; - } - - return NULL_RTX; -} - -/* Helper function to do the actual work for expand_builtin_strcpy. The - arguments to the builtin_strcpy call DEST and SRC are broken out - so that this can also be called without constructing an actual CALL_EXPR. - The other arguments and return value are the same as for - expand_builtin_strcpy. */ -static rtx -expand_builtin_strcpy_args (tree exp, tree dest, tree src, rtx target) -{ - /* Detect strcpy calls with unterminated arrays.. */ - tree size; - bool exact; - if (tree nonstr = unterminated_array (src, &size, &exact)) - { - /* NONSTR refers to the non-nul terminated constant array. */ - warn_string_no_nul (EXPR_LOCATION (exp), exp, NULL, src, nonstr, - size, exact); - return NULL_RTX; + /* Return the value in the proper mode for this function. */ + machine_mode mode = TYPE_MODE (TREE_TYPE (exp)); + if (GET_MODE (result) == mode) + return result; + if (target == 0) + return convert_to_mode (mode, result, 0); + convert_move (target, result, 0); + return target; } - return expand_movstr (dest, src, target, /*retmode=*/ RETURN_BEGIN); + /* Expand the library call ourselves using a stabilized argument + list to avoid re-evaluating the function's arguments twice. */ + tree fn = build_call_nofold_loc (EXPR_LOCATION (exp), fndecl, 2, arg1, arg2); + gcc_assert (TREE_CODE (fn) == CALL_EXPR); + CALL_EXPR_TAILCALL (fn) = CALL_EXPR_TAILCALL (exp); + return expand_call (fn, target, target == const0_rtx); } -/* Expand a call EXP to the stpcpy builtin. - Return NULL_RTX if we failed the caller should emit a normal call, - otherwise try to get the result in TARGET, if convenient (and in - mode MODE if that's convenient). */ +/* Expand expression EXP, which is a call to the strncmp builtin. Return + NULL_RTX if we failed the caller should emit a normal call, otherwise + try to get the result in TARGET, if convenient. */ static rtx -expand_builtin_stpcpy_1 (tree exp, rtx target, machine_mode mode) +expand_builtin_strncmp (tree exp, ATTRIBUTE_UNUSED rtx target, + ATTRIBUTE_UNUSED machine_mode mode) { - tree dst, src; - location_t loc = EXPR_LOCATION (exp); + if (!validate_arglist (exp, + POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) + return NULL_RTX; - if (!validate_arglist (exp, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) + tree arg1 = CALL_EXPR_ARG (exp, 0); + tree arg2 = CALL_EXPR_ARG (exp, 1); + tree arg3 = CALL_EXPR_ARG (exp, 2); + + if (!check_nul_terminated_array (exp, arg1, arg3) + || !check_nul_terminated_array (exp, arg2, arg3)) return NULL_RTX; - dst = CALL_EXPR_ARG (exp, 0); - src = CALL_EXPR_ARG (exp, 1); + location_t loc = EXPR_LOCATION (exp); + tree len1 = c_strlen (arg1, 1); + tree len2 = c_strlen (arg2, 1); - if (warn_stringop_overflow) + if (!len1 || !len2) { - access_data data (exp, access_read_write); - tree destsize = compute_objsize (dst, warn_stringop_overflow - 1, - &data.dst); - check_access (exp, /*dstwrite=*/NULL_TREE, /*maxread=*/NULL_TREE, - src, destsize, data.mode, &data); - } - - /* If return value is ignored, transform stpcpy into strcpy. */ - if (target == const0_rtx && builtin_decl_implicit (BUILT_IN_STRCPY)) - { - tree fn = builtin_decl_implicit (BUILT_IN_STRCPY); - tree result = build_call_nofold_loc (loc, fn, 2, dst, src); - return expand_expr (result, target, mode, EXPAND_NORMAL); - } - else - { - tree len, lenp1; - rtx ret; - - /* Ensure we get an actual string whose length can be evaluated at - compile-time, not an expression containing a string. This is - because the latter will potentially produce pessimized code - when used to produce the return value. */ - c_strlen_data lendata = { }; - if (!c_getstr (src) - || !(len = c_strlen (src, 0, &lendata, 1))) - return expand_movstr (dst, src, target, - /*retmode=*/ RETURN_END_MINUS_ONE); - - if (lendata.decl) - warn_string_no_nul (EXPR_LOCATION (exp), exp, NULL, src, lendata.decl); + /* Check to see if the argument was declared attribute nonstring + and if so, issue a warning since at this point it's not known + to be nul-terminated. */ + if (!maybe_warn_nonstring_arg (get_callee_fndecl (exp), exp) + && !len1 && !len2) + { + /* A strncmp read is constrained not just by the bound but + also by the length of the shorter string. Specifying + a bound that's larger than the size of either array makes + no sense and is likely a bug. When the length of neither + of the two strings is known but the sizes of both of + the arrays they are stored in is, issue a warning if + the bound is larger than than the size of the larger + of the two arrays. */ - lenp1 = size_binop_loc (loc, PLUS_EXPR, len, ssize_int (1)); - ret = expand_builtin_mempcpy_args (dst, src, lenp1, - target, exp, - /*retmode=*/ RETURN_END_MINUS_ONE); + access_ref ref1 (arg3, true); + access_ref ref2 (arg3, true); - if (ret) - return ret; + tree bndrng[2] = { NULL_TREE, NULL_TREE }; + get_size_range (arg3, bndrng, ref1.bndrng); - if (TREE_CODE (len) == INTEGER_CST) - { - rtx len_rtx = expand_normal (len); + tree size1 = compute_objsize (arg1, 1, &ref1); + tree size2 = compute_objsize (arg2, 1, &ref2); + tree func = get_callee_fndecl (exp); - if (CONST_INT_P (len_rtx)) + if (size1 && size2 && bndrng[0] && !integer_zerop (bndrng[0])) { - ret = expand_builtin_strcpy_args (exp, dst, src, target); - - if (ret) + offset_int rem1 = ref1.size_remaining (); + offset_int rem2 = ref2.size_remaining (); + if (rem1 == 0 || rem2 == 0) + maybe_warn_for_bound (OPT_Wstringop_overread, loc, exp, func, + bndrng, integer_zero_node); + else { - if (! target) - { - if (mode != VOIDmode) - target = gen_reg_rtx (mode); - else - target = gen_reg_rtx (GET_MODE (ret)); - } - if (GET_MODE (target) != GET_MODE (ret)) - ret = gen_lowpart (GET_MODE (target), ret); - - ret = plus_constant (GET_MODE (ret), ret, INTVAL (len_rtx)); - ret = emit_move_insn (target, force_operand (ret, NULL_RTX)); - gcc_assert (ret); - - return target; + offset_int maxrem = wi::max (rem1, rem2, UNSIGNED); + if (maxrem < wi::to_offset (bndrng[0])) + maybe_warn_for_bound (OPT_Wstringop_overread, loc, exp, + func, bndrng, + wide_int_to_tree (sizetype, maxrem)); } } + else if (bndrng[0] + && !integer_zerop (bndrng[0]) + && ((size1 && integer_zerop (size1)) + || (size2 && integer_zerop (size2)))) + maybe_warn_for_bound (OPT_Wstringop_overread, loc, exp, func, + bndrng, integer_zero_node); } - - return expand_movstr (dst, src, target, - /*retmode=*/ RETURN_END_MINUS_ONE); } -} -/* Expand a call EXP to the stpcpy builtin and diagnose uses of nonstring - arguments while being careful to avoid duplicate warnings (which could - be issued if the expander were to expand the call, resulting in it - being emitted in expand_call(). */ + /* Due to the performance benefit, always inline the calls first. */ + rtx result = NULL_RTX; + result = inline_expand_builtin_bytecmp (exp, target); + if (result) + return result; -static rtx -expand_builtin_stpcpy (tree exp, rtx target, machine_mode mode) -{ - if (rtx ret = expand_builtin_stpcpy_1 (exp, target, mode)) - { - /* The call has been successfully expanded. Check for nonstring - arguments and issue warnings as appropriate. */ - maybe_warn_nonstring_arg (get_callee_fndecl (exp), exp); - return ret; - } + /* If c_strlen can determine an expression for one of the string + lengths, and it doesn't have side effects, then emit cmpstrnsi + using length MIN(strlen(string)+1, arg3). */ + insn_code cmpstrn_icode = direct_optab_handler (cmpstrn_optab, SImode); + if (cmpstrn_icode == CODE_FOR_nothing) + return NULL_RTX; - return NULL_RTX; -} + tree len; -/* Check a call EXP to the stpncpy built-in for validity. - Return NULL_RTX on both success and failure. */ + unsigned int arg1_align = get_pointer_alignment (arg1) / BITS_PER_UNIT; + unsigned int arg2_align = get_pointer_alignment (arg2) / BITS_PER_UNIT; -static rtx -expand_builtin_stpncpy (tree exp, rtx) -{ - if (!validate_arglist (exp, - POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE) - || !warn_stringop_overflow) - return NULL_RTX; + if (len1) + len1 = size_binop_loc (loc, PLUS_EXPR, ssize_int (1), len1); + if (len2) + len2 = size_binop_loc (loc, PLUS_EXPR, ssize_int (1), len2); - /* The source and destination of the call. */ - tree dest = CALL_EXPR_ARG (exp, 0); - tree src = CALL_EXPR_ARG (exp, 1); + tree len3 = fold_convert_loc (loc, sizetype, arg3); - /* The exact number of bytes to write (not the maximum). */ - tree len = CALL_EXPR_ARG (exp, 2); - access_data data (exp, access_read_write); - /* The size of the destination object. */ - tree destsize = compute_objsize (dest, warn_stringop_overflow - 1, &data.dst); - check_access (exp, len, /*maxread=*/len, src, destsize, data.mode, &data); - return NULL_RTX; -} + /* If we don't have a constant length for the first, use the length + of the second, if we know it. If neither string is constant length, + use the given length argument. We don't require a constant for + this case; some cost analysis could be done if both are available + but neither is constant. For now, assume they're equally cheap, + unless one has side effects. If both strings have constant lengths, + use the smaller. */ -/* Callback routine for store_by_pieces. Read GET_MODE_BITSIZE (MODE) - bytes from constant string DATA + OFFSET and return it as target - constant. */ + if (!len1 && !len2) + len = len3; + else if (!len1) + len = len2; + else if (!len2) + len = len1; + else if (TREE_SIDE_EFFECTS (len1)) + len = len2; + else if (TREE_SIDE_EFFECTS (len2)) + len = len1; + else if (TREE_CODE (len1) != INTEGER_CST) + len = len2; + else if (TREE_CODE (len2) != INTEGER_CST) + len = len1; + else if (tree_int_cst_lt (len1, len2)) + len = len1; + else + len = len2; -rtx -builtin_strncpy_read_str (void *data, void *, HOST_WIDE_INT offset, - scalar_int_mode mode) -{ - const char *str = (const char *) data; + /* If we are not using the given length, we must incorporate it here. + The actual new length parameter will be MIN(len,arg3) in this case. */ + if (len != len3) + { + len = fold_convert_loc (loc, sizetype, len); + len = fold_build2_loc (loc, MIN_EXPR, TREE_TYPE (len), len, len3); + } + rtx arg1_rtx = get_memory_rtx (arg1, len); + rtx arg2_rtx = get_memory_rtx (arg2, len); + rtx arg3_rtx = expand_normal (len); + result = expand_cmpstrn_or_cmpmem (cmpstrn_icode, target, arg1_rtx, + arg2_rtx, TREE_TYPE (len), arg3_rtx, + MIN (arg1_align, arg2_align)); - if ((unsigned HOST_WIDE_INT) offset > strlen (str)) - return const0_rtx; + tree fndecl = get_callee_fndecl (exp); + if (result) + { + /* Return the value in the proper mode for this function. */ + mode = TYPE_MODE (TREE_TYPE (exp)); + if (GET_MODE (result) == mode) + return result; + if (target == 0) + return convert_to_mode (mode, result, 0); + convert_move (target, result, 0); + return target; + } - return c_readstr (str + offset, mode); + /* Expand the library call ourselves using a stabilized argument + list to avoid re-evaluating the function's arguments twice. */ + tree call = build_call_nofold_loc (loc, fndecl, 3, arg1, arg2, len); + copy_warning (call, exp); + gcc_assert (TREE_CODE (call) == CALL_EXPR); + CALL_EXPR_TAILCALL (call) = CALL_EXPR_TAILCALL (exp); + return expand_call (call, target, target == const0_rtx); } -/* Helper to check the sizes of sequences and the destination of calls - to __builtin_strncat and __builtin___strncat_chk. Returns true on - success (no overflow or invalid sizes), false otherwise. */ +/* Expand a call to __builtin_saveregs, generating the result in TARGET, + if that's convenient. */ -static bool -check_strncat_sizes (tree exp, tree objsize) +rtx +expand_builtin_saveregs (void) { - tree dest = CALL_EXPR_ARG (exp, 0); - tree src = CALL_EXPR_ARG (exp, 1); - tree maxread = CALL_EXPR_ARG (exp, 2); + rtx val; + rtx_insn *seq; - /* Try to determine the range of lengths that the source expression - refers to. */ - c_strlen_data lendata = { }; - get_range_strlen (src, &lendata, /* eltsize = */ 1); + /* Don't do __builtin_saveregs more than once in a function. + Save the result of the first call and reuse it. */ + if (saveregs_value != 0) + return saveregs_value; - /* Try to verify that the destination is big enough for the shortest - string. */ + /* When this function is called, it means that registers must be + saved on entry to this function. So we migrate the call to the + first insn of this function. */ - access_data data (exp, access_read_write, maxread, true); - if (!objsize && warn_stringop_overflow) - { - /* If it hasn't been provided by __strncat_chk, try to determine - the size of the destination object into which the source is - being copied. */ - objsize = compute_objsize (dest, warn_stringop_overflow - 1, &data.dst); - } + start_sequence (); - /* Add one for the terminating nul. */ - tree srclen = (lendata.minlen - ? fold_build2 (PLUS_EXPR, size_type_node, lendata.minlen, - size_one_node) - : NULL_TREE); + /* Do whatever the machine needs done in this case. */ + val = targetm.calls.expand_builtin_saveregs (); - /* The strncat function copies at most MAXREAD bytes and always appends - the terminating nul so the specified upper bound should never be equal - to (or greater than) the size of the destination. */ - if (tree_fits_uhwi_p (maxread) && tree_fits_uhwi_p (objsize) - && tree_int_cst_equal (objsize, maxread)) - { - location_t loc = EXPR_LOCATION (exp); - warning_at (loc, OPT_Wstringop_overflow_, - "%qD specified bound %E equals destination size", - get_callee_fndecl (exp), maxread); + seq = get_insns (); + end_sequence (); - return false; - } + saveregs_value = val; - if (!srclen - || (maxread && tree_fits_uhwi_p (maxread) - && tree_fits_uhwi_p (srclen) - && tree_int_cst_lt (maxread, srclen))) - srclen = maxread; + /* Put the insns after the NOTE that starts the function. If this + is inside a start_sequence, make the outer-level insn chain current, so + the code is placed at the start of the function. */ + push_topmost_sequence (); + emit_insn_after (seq, entry_of_function ()); + pop_topmost_sequence (); - /* The number of bytes to write is LEN but check_access will alsoa - check SRCLEN if LEN's value isn't known. */ - return check_access (exp, /*dstwrite=*/NULL_TREE, maxread, srclen, - objsize, data.mode, &data); + return val; } -/* Similar to expand_builtin_strcat, do some very basic size validation - of a call to the strcpy builtin given by EXP. Return NULL_RTX to have - the built-in expand to a call to the library function. */ +/* Expand a call to __builtin_next_arg. */ static rtx -expand_builtin_strncat (tree exp, rtx) +expand_builtin_next_arg (void) { - if (!validate_arglist (exp, - POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE) - || !warn_stringop_overflow) - return NULL_RTX; + /* Checking arguments is already done in fold_builtin_next_arg + that must be called before this function. */ + return expand_binop (ptr_mode, add_optab, + crtl->args.internal_arg_pointer, + crtl->args.arg_offset_rtx, + NULL_RTX, 0, OPTAB_LIB_WIDEN); +} - tree dest = CALL_EXPR_ARG (exp, 0); - tree src = CALL_EXPR_ARG (exp, 1); - /* The upper bound on the number of bytes to write. */ - tree maxread = CALL_EXPR_ARG (exp, 2); +/* Make it easier for the backends by protecting the valist argument + from multiple evaluations. */ - /* Detect unterminated source (only). */ - if (!check_nul_terminated_array (exp, src, maxread)) - return NULL_RTX; +static tree +stabilize_va_list_loc (location_t loc, tree valist, int needs_lvalue) +{ + tree vatype = targetm.canonical_va_list_type (TREE_TYPE (valist)); - /* The length of the source sequence. */ - tree slen = c_strlen (src, 1); + /* The current way of determining the type of valist is completely + bogus. We should have the information on the va builtin instead. */ + if (!vatype) + vatype = targetm.fn_abi_va_list (cfun->decl); - /* Try to determine the range of lengths that the source expression - refers to. Since the lengths are only used for warning and not - for code generation disable strict mode below. */ - tree maxlen = slen; - if (!maxlen) + if (TREE_CODE (vatype) == ARRAY_TYPE) { - c_strlen_data lendata = { }; - get_range_strlen (src, &lendata, /* eltsize = */ 1); - maxlen = lendata.maxbound; - } + if (TREE_SIDE_EFFECTS (valist)) + valist = save_expr (valist); - access_data data (exp, access_read_write); - /* Try to verify that the destination is big enough for the shortest - string. First try to determine the size of the destination object - into which the source is being copied. */ - tree destsize = compute_objsize (dest, warn_stringop_overflow - 1, &data.dst); + /* For this case, the backends will be expecting a pointer to + vatype, but it's possible we've actually been given an array + (an actual TARGET_CANONICAL_VA_LIST_TYPE (valist)). + So fix it. */ + if (TREE_CODE (TREE_TYPE (valist)) == ARRAY_TYPE) + { + tree p1 = build_pointer_type (TREE_TYPE (vatype)); + valist = build_fold_addr_expr_with_type_loc (loc, valist, p1); + } + } + else + { + tree pt = build_pointer_type (vatype); - /* Add one for the terminating nul. */ - tree srclen = (maxlen - ? fold_build2 (PLUS_EXPR, size_type_node, maxlen, - size_one_node) - : NULL_TREE); + if (! needs_lvalue) + { + if (! TREE_SIDE_EFFECTS (valist)) + return valist; - /* The strncat function copies at most MAXREAD bytes and always appends - the terminating nul so the specified upper bound should never be equal - to (or greater than) the size of the destination. */ - if (tree_fits_uhwi_p (maxread) && tree_fits_uhwi_p (destsize) - && tree_int_cst_equal (destsize, maxread)) - { - location_t loc = EXPR_LOCATION (exp); - warning_at (loc, OPT_Wstringop_overflow_, - "%qD specified bound %E equals destination size", - get_callee_fndecl (exp), maxread); + valist = fold_build1_loc (loc, ADDR_EXPR, pt, valist); + TREE_SIDE_EFFECTS (valist) = 1; + } - return NULL_RTX; + if (TREE_SIDE_EFFECTS (valist)) + valist = save_expr (valist); + valist = fold_build2_loc (loc, MEM_REF, + vatype, valist, build_int_cst (pt, 0)); } - if (!srclen - || (maxread && tree_fits_uhwi_p (maxread) - && tree_fits_uhwi_p (srclen) - && tree_int_cst_lt (maxread, srclen))) - srclen = maxread; - - check_access (exp, /*dstwrite=*/NULL_TREE, maxread, srclen, - destsize, data.mode, &data); - return NULL_RTX; + return valist; } -/* Expand expression EXP, which is a call to the strncpy builtin. Return - NULL_RTX if we failed the caller should emit a normal call. */ +/* The "standard" definition of va_list is void*. */ -static rtx -expand_builtin_strncpy (tree exp, rtx target) +tree +std_build_builtin_va_list (void) { - location_t loc = EXPR_LOCATION (exp); + return ptr_type_node; +} - if (!validate_arglist (exp, - POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) - return NULL_RTX; - tree dest = CALL_EXPR_ARG (exp, 0); - tree src = CALL_EXPR_ARG (exp, 1); - /* The number of bytes to write (not the maximum). */ - tree len = CALL_EXPR_ARG (exp, 2); +/* The "standard" abi va_list is va_list_type_node. */ - /* The length of the source sequence. */ - tree slen = c_strlen (src, 1); +tree +std_fn_abi_va_list (tree fndecl ATTRIBUTE_UNUSED) +{ + return va_list_type_node; +} - if (warn_stringop_overflow) - { - access_data data (exp, access_read_write, len, true, len, true); - const int ost = warn_stringop_overflow ? warn_stringop_overflow - 1 : 1; - compute_objsize (src, ost, &data.src); - tree dstsize = compute_objsize (dest, ost, &data.dst); - /* The number of bytes to write is LEN but check_access will also - check SLEN if LEN's value isn't known. */ - check_access (exp, /*dstwrite=*/len, - /*maxread=*/len, src, dstsize, data.mode, &data); - } +/* The "standard" type of va_list is va_list_type_node. */ - /* We must be passed a constant len and src parameter. */ - if (!tree_fits_uhwi_p (len) || !slen || !tree_fits_uhwi_p (slen)) - return NULL_RTX; +tree +std_canonical_va_list_type (tree type) +{ + tree wtype, htype; - slen = size_binop_loc (loc, PLUS_EXPR, slen, ssize_int (1)); + wtype = va_list_type_node; + htype = type; - /* We're required to pad with trailing zeros if the requested - len is greater than strlen(s2)+1. In that case try to - use store_by_pieces, if it fails, punt. */ - if (tree_int_cst_lt (slen, len)) + if (TREE_CODE (wtype) == ARRAY_TYPE) { - unsigned int dest_align = get_pointer_alignment (dest); - const char *p = c_getstr (src); - rtx dest_mem; + /* If va_list is an array type, the argument may have decayed + to a pointer type, e.g. by being passed to another function. + In that case, unwrap both types so that we can compare the + underlying records. */ + if (TREE_CODE (htype) == ARRAY_TYPE + || POINTER_TYPE_P (htype)) + { + wtype = TREE_TYPE (wtype); + htype = TREE_TYPE (htype); + } + } + if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype)) + return va_list_type_node; - if (!p || dest_align == 0 || !tree_fits_uhwi_p (len) - || !can_store_by_pieces (tree_to_uhwi (len), - builtin_strncpy_read_str, - CONST_CAST (char *, p), - dest_align, false)) - return NULL_RTX; + return NULL_TREE; +} - dest_mem = get_memory_rtx (dest, len); - store_by_pieces (dest_mem, tree_to_uhwi (len), - builtin_strncpy_read_str, - CONST_CAST (char *, p), dest_align, false, - RETURN_BEGIN); - dest_mem = force_operand (XEXP (dest_mem, 0), target); - dest_mem = convert_memory_address (ptr_mode, dest_mem); - return dest_mem; - } +/* The "standard" implementation of va_start: just assign `nextarg' to + the variable. */ - return NULL_RTX; +void +std_expand_builtin_va_start (tree valist, rtx nextarg) +{ + rtx va_r = expand_expr (valist, NULL_RTX, VOIDmode, EXPAND_WRITE); + convert_move (va_r, nextarg, 0); } -/* Callback routine for store_by_pieces. Read GET_MODE_BITSIZE (MODE) - bytes from constant string DATA + OFFSET and return it as target - constant. If PREV isn't nullptr, it has the RTL info from the - previous iteration. */ +/* Expand EXP, a call to __builtin_va_start. */ -rtx -builtin_memset_read_str (void *data, void *prevp, - HOST_WIDE_INT offset ATTRIBUTE_UNUSED, - scalar_int_mode mode) +static rtx +expand_builtin_va_start (tree exp) { - by_pieces_prev *prev = (by_pieces_prev *) prevp; - if (prev != nullptr && prev->data != nullptr) + rtx nextarg; + tree valist; + location_t loc = EXPR_LOCATION (exp); + + if (call_expr_nargs (exp) < 2) { - /* Use the previous data in the same mode. */ - if (prev->mode == mode) - return prev->data; + error_at (loc, "too few arguments to function %"); + return const0_rtx; } - const char *c = (const char *) data; - char *p = XALLOCAVEC (char, GET_MODE_SIZE (mode)); + if (fold_builtin_next_arg (exp, true)) + return const0_rtx; - memset (p, *c, GET_MODE_SIZE (mode)); + nextarg = expand_builtin_next_arg (); + valist = stabilize_va_list_loc (loc, CALL_EXPR_ARG (exp, 0), 1); - return c_readstr (p, mode); + if (targetm.expand_builtin_va_start) + targetm.expand_builtin_va_start (valist, nextarg); + else + std_expand_builtin_va_start (valist, nextarg); + + return const0_rtx; } -/* Callback routine for store_by_pieces. Return the RTL of a register - containing GET_MODE_SIZE (MODE) consecutive copies of the unsigned - char value given in the RTL register data. For example, if mode is - 4 bytes wide, return the RTL for 0x01010101*data. If PREV isn't - nullptr, it has the RTL info from the previous iteration. */ +/* Expand EXP, a call to __builtin_va_end. */ static rtx -builtin_memset_gen_str (void *data, void *prevp, - HOST_WIDE_INT offset ATTRIBUTE_UNUSED, - scalar_int_mode mode) +expand_builtin_va_end (tree exp) { - rtx target, coeff; - size_t size; - char *p; - - by_pieces_prev *prev = (by_pieces_prev *) prevp; - if (prev != nullptr && prev->data != nullptr) - { - /* Use the previous data in the same mode. */ - if (prev->mode == mode) - return prev->data; - - target = simplify_gen_subreg (mode, prev->data, prev->mode, 0); - if (target != nullptr) - return target; - } - - size = GET_MODE_SIZE (mode); - if (size == 1) - return (rtx) data; + tree valist = CALL_EXPR_ARG (exp, 0); - p = XALLOCAVEC (char, size); - memset (p, 1, size); - coeff = c_readstr (p, mode); + /* Evaluate for side effects, if needed. I hate macros that don't + do that. */ + if (TREE_SIDE_EFFECTS (valist)) + expand_expr (valist, const0_rtx, VOIDmode, EXPAND_NORMAL); - target = convert_to_mode (mode, (rtx) data, 1); - target = expand_mult (mode, target, coeff, NULL_RTX, 1); - return force_reg (mode, target); + return const0_rtx; } -/* Expand expression EXP, which is a call to the memset builtin. Return - NULL_RTX if we failed the caller should emit a normal call, otherwise - try to get the result in TARGET, if convenient (and in mode MODE if that's - convenient). */ +/* Expand EXP, a call to __builtin_va_copy. We do this as a + builtin rather than just as an assignment in stdarg.h because of the + nastiness of array-type va_list types. */ static rtx -expand_builtin_memset (tree exp, rtx target, machine_mode mode) +expand_builtin_va_copy (tree exp) { - if (!validate_arglist (exp, - POINTER_TYPE, INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE)) - return NULL_RTX; + tree dst, src, t; + location_t loc = EXPR_LOCATION (exp); - tree dest = CALL_EXPR_ARG (exp, 0); - tree val = CALL_EXPR_ARG (exp, 1); - tree len = CALL_EXPR_ARG (exp, 2); + dst = CALL_EXPR_ARG (exp, 0); + src = CALL_EXPR_ARG (exp, 1); - check_memop_access (exp, dest, NULL_TREE, len); + dst = stabilize_va_list_loc (loc, dst, 1); + src = stabilize_va_list_loc (loc, src, 0); - return expand_builtin_memset_args (dest, val, len, target, mode, exp); -} + gcc_assert (cfun != NULL && cfun->decl != NULL_TREE); -/* Try to store VAL (or, if NULL_RTX, VALC) in LEN bytes starting at TO. - Return TRUE if successful, FALSE otherwise. TO is assumed to be - aligned at an ALIGN-bits boundary. LEN must be a multiple of - 1<decl)) != ARRAY_TYPE) + { + t = build2 (MODIFY_EXPR, targetm.fn_abi_va_list (cfun->decl), dst, src); + TREE_SIDE_EFFECTS (t) = 1; + expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); + } + else + { + rtx dstb, srcb, size; - The strategy is to issue one store_by_pieces for each power of two, - from most to least significant, guarded by a test on whether there - are at least that many bytes left to copy in LEN. + /* Evaluate to pointers. */ + dstb = expand_expr (dst, NULL_RTX, Pmode, EXPAND_NORMAL); + srcb = expand_expr (src, NULL_RTX, Pmode, EXPAND_NORMAL); + size = expand_expr (TYPE_SIZE_UNIT (targetm.fn_abi_va_list (cfun->decl)), + NULL_RTX, VOIDmode, EXPAND_NORMAL); - ??? Should we skip some powers of two in favor of loops? Maybe start - at the max of TO/LEN/word alignment, at least when optimizing for - size, instead of ensuring O(log len) dynamic compares? */ + dstb = convert_memory_address (Pmode, dstb); + srcb = convert_memory_address (Pmode, srcb); -bool -try_store_by_multiple_pieces (rtx to, rtx len, unsigned int ctz_len, - unsigned HOST_WIDE_INT min_len, - unsigned HOST_WIDE_INT max_len, - rtx val, char valc, unsigned int align) -{ - int max_bits = floor_log2 (max_len); - int min_bits = floor_log2 (min_len); - int sctz_len = ctz_len; + /* "Dereference" to BLKmode memories. */ + dstb = gen_rtx_MEM (BLKmode, dstb); + set_mem_alias_set (dstb, get_alias_set (TREE_TYPE (TREE_TYPE (dst)))); + set_mem_align (dstb, TYPE_ALIGN (targetm.fn_abi_va_list (cfun->decl))); + srcb = gen_rtx_MEM (BLKmode, srcb); + set_mem_alias_set (srcb, get_alias_set (TREE_TYPE (TREE_TYPE (src)))); + set_mem_align (srcb, TYPE_ALIGN (targetm.fn_abi_va_list (cfun->decl))); - gcc_checking_assert (sctz_len >= 0); + /* Copy. */ + emit_block_move (dstb, srcb, size, BLOCK_OP_NORMAL); + } - if (val) - valc = 1; + return const0_rtx; +} - /* Bits more significant than TST_BITS are part of the shared prefix - in the binary representation of both min_len and max_len. Since - they're identical, we don't need to test them in the loop. */ - int tst_bits = (max_bits != min_bits ? max_bits - : floor_log2 (max_len ^ min_len)); +/* Expand a call to one of the builtin functions __builtin_frame_address or + __builtin_return_address. */ - /* Check whether it's profitable to start by storing a fixed BLKSIZE - bytes, to lower max_bits. In the unlikely case of a constant LEN - (implied by identical MAX_LEN and MIN_LEN), we want to issue a - single store_by_pieces, but otherwise, select the minimum multiple - of the ALIGN (in bytes) and of the MCD of the possible LENs, that - brings MAX_LEN below TST_BITS, if that's lower than min_len. */ - unsigned HOST_WIDE_INT blksize; - if (max_len > min_len) +static rtx +expand_builtin_frame_address (tree fndecl, tree exp) +{ + /* The argument must be a nonnegative integer constant. + It counts the number of frames to scan up the stack. + The value is either the frame pointer value or the return + address saved in that frame. */ + if (call_expr_nargs (exp) == 0) + /* Warning about missing arg was already issued. */ + return const0_rtx; + else if (! tree_fits_uhwi_p (CALL_EXPR_ARG (exp, 0))) { - unsigned HOST_WIDE_INT alrng = MAX (HOST_WIDE_INT_1U << ctz_len, - align / BITS_PER_UNIT); - blksize = max_len - (HOST_WIDE_INT_1U << tst_bits) + alrng; - blksize &= ~(alrng - 1); + error ("invalid argument to %qD", fndecl); + return const0_rtx; } - else if (max_len == min_len) - blksize = max_len; else - gcc_unreachable (); - if (min_len >= blksize) { - min_len -= blksize; - min_bits = floor_log2 (min_len); - max_len -= blksize; - max_bits = floor_log2 (max_len); - - tst_bits = (max_bits != min_bits ? max_bits - : floor_log2 (max_len ^ min_len)); - } - else - blksize = 0; + /* Number of frames to scan up the stack. */ + unsigned HOST_WIDE_INT count = tree_to_uhwi (CALL_EXPR_ARG (exp, 0)); - /* Check that we can use store by pieces for the maximum store count - we may issue (initial fixed-size block, plus conditional - power-of-two-sized from max_bits to ctz_len. */ - unsigned HOST_WIDE_INT xlenest = blksize; - if (max_bits >= 0) - xlenest += ((HOST_WIDE_INT_1U << max_bits) * 2 - - (HOST_WIDE_INT_1U << ctz_len)); - if (!can_store_by_pieces (xlenest, builtin_memset_read_str, - &valc, align, true)) - return false; + rtx tem = expand_builtin_return_addr (DECL_FUNCTION_CODE (fndecl), count); - rtx (*constfun) (void *, void *, HOST_WIDE_INT, scalar_int_mode); - void *constfundata; - if (val) - { - constfun = builtin_memset_gen_str; - constfundata = val = force_reg (TYPE_MODE (unsigned_char_type_node), - val); - } - else - { - constfun = builtin_memset_read_str; - constfundata = &valc; - } + /* Some ports cannot access arbitrary stack frames. */ + if (tem == NULL) + { + warning (0, "unsupported argument to %qD", fndecl); + return const0_rtx; + } - rtx ptr = copy_addr_to_reg (convert_to_mode (ptr_mode, XEXP (to, 0), 0)); - rtx rem = copy_to_mode_reg (ptr_mode, convert_to_mode (ptr_mode, len, 0)); - to = replace_equiv_address (to, ptr); - set_mem_align (to, align); + if (count) + { + /* Warn since no effort is made to ensure that any frame + beyond the current one exists or can be safely reached. */ + warning (OPT_Wframe_address, "calling %qD with " + "a nonzero argument is unsafe", fndecl); + } - if (blksize) - { - to = store_by_pieces (to, blksize, - constfun, constfundata, - align, true, - max_len != 0 ? RETURN_END : RETURN_BEGIN); - if (max_len == 0) - return true; + /* For __builtin_frame_address, return what we've got. */ + if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_FRAME_ADDRESS) + return tem; - /* Adjust PTR, TO and REM. Since TO's address is likely - PTR+offset, we have to replace it. */ - emit_move_insn (ptr, force_operand (XEXP (to, 0), NULL_RTX)); - to = replace_equiv_address (to, ptr); - rtx rem_minus_blksize = plus_constant (ptr_mode, rem, -blksize); - emit_move_insn (rem, force_operand (rem_minus_blksize, NULL_RTX)); + if (!REG_P (tem) + && ! CONSTANT_P (tem)) + tem = copy_addr_to_reg (tem); + return tem; } +} - /* Iterate over power-of-two block sizes from the maximum length to - the least significant bit possibly set in the length. */ - for (int i = max_bits; i >= sctz_len; i--) - { - rtx_code_label *label = NULL; - blksize = HOST_WIDE_INT_1U << i; +/* Expand EXP, a call to the alloca builtin. Return NULL_RTX if we + failed and the caller should emit a normal call. */ - /* If we're past the bits shared between min_ and max_len, expand - a test on the dynamic length, comparing it with the - BLKSIZE. */ - if (i <= tst_bits) - { - label = gen_label_rtx (); - emit_cmp_and_jump_insns (rem, GEN_INT (blksize), LT, NULL, - ptr_mode, 1, label, - profile_probability::even ()); - } - /* If we are at a bit that is in the prefix shared by min_ and - max_len, skip this BLKSIZE if the bit is clear. */ - else if ((max_len & blksize) == 0) - continue; +static rtx +expand_builtin_alloca (tree exp) +{ + rtx op0; + rtx result; + unsigned int align; + tree fndecl = get_callee_fndecl (exp); + HOST_WIDE_INT max_size; + enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); + bool alloca_for_var = CALL_ALLOCA_FOR_VAR_P (exp); + bool valid_arglist + = (fcode == BUILT_IN_ALLOCA_WITH_ALIGN_AND_MAX + ? validate_arglist (exp, INTEGER_TYPE, INTEGER_TYPE, INTEGER_TYPE, + VOID_TYPE) + : fcode == BUILT_IN_ALLOCA_WITH_ALIGN + ? validate_arglist (exp, INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE) + : validate_arglist (exp, INTEGER_TYPE, VOID_TYPE)); - /* Issue a store of BLKSIZE bytes. */ - to = store_by_pieces (to, blksize, - constfun, constfundata, - align, true, - i != sctz_len ? RETURN_END : RETURN_BEGIN); + if (!valid_arglist) + return NULL_RTX; - /* Adjust REM and PTR, unless this is the last iteration. */ - if (i != sctz_len) - { - emit_move_insn (ptr, force_operand (XEXP (to, 0), NULL_RTX)); - to = replace_equiv_address (to, ptr); - rtx rem_minus_blksize = plus_constant (ptr_mode, rem, -blksize); - emit_move_insn (rem, force_operand (rem_minus_blksize, NULL_RTX)); - } + if ((alloca_for_var + && warn_vla_limit >= HOST_WIDE_INT_MAX + && warn_alloc_size_limit < warn_vla_limit) + || (!alloca_for_var + && warn_alloca_limit >= HOST_WIDE_INT_MAX + && warn_alloc_size_limit < warn_alloca_limit + )) + { + /* -Walloca-larger-than and -Wvla-larger-than settings of + less than HOST_WIDE_INT_MAX override the more general + -Walloc-size-larger-than so unless either of the former + options is smaller than the last one (wchich would imply + that the call was already checked), check the alloca + arguments for overflow. */ + tree args[] = { CALL_EXPR_ARG (exp, 0), NULL_TREE }; + int idx[] = { 0, -1 }; + maybe_warn_alloc_args_overflow (fndecl, exp, args, idx); + } - if (label) - { - emit_label (label); + /* Compute the argument. */ + op0 = expand_normal (CALL_EXPR_ARG (exp, 0)); - /* Given conditional stores, the offset can no longer be - known, so clear it. */ - clear_mem_offset (to); - } - } + /* Compute the alignment. */ + align = (fcode == BUILT_IN_ALLOCA + ? BIGGEST_ALIGNMENT + : TREE_INT_CST_LOW (CALL_EXPR_ARG (exp, 1))); - return true; + /* Compute the maximum size. */ + max_size = (fcode == BUILT_IN_ALLOCA_WITH_ALIGN_AND_MAX + ? TREE_INT_CST_LOW (CALL_EXPR_ARG (exp, 2)) + : -1); + + /* Allocate the desired space. If the allocation stems from the declaration + of a variable-sized object, it cannot accumulate. */ + result + = allocate_dynamic_stack_space (op0, 0, align, max_size, alloca_for_var); + result = convert_memory_address (ptr_mode, result); + + /* Dynamic allocations for variables are recorded during gimplification. */ + if (!alloca_for_var && (flag_callgraph_info & CALLGRAPH_INFO_DYNAMIC_ALLOC)) + record_dynamic_alloc (exp); + + return result; } -/* Helper function to do the actual work for expand_builtin_memset. The - arguments to the builtin_memset call DEST, VAL, and LEN are broken out - so that this can also be called without constructing an actual CALL_EXPR. - The other arguments and return value are the same as for - expand_builtin_memset. */ +/* Emit a call to __asan_allocas_unpoison call in EXP. Add to second argument + of the call virtual_stack_dynamic_rtx - stack_pointer_rtx, which is the + STACK_DYNAMIC_OFFSET value. See motivation for this in comment to + handle_builtin_stack_restore function. */ static rtx -expand_builtin_memset_args (tree dest, tree val, tree len, - rtx target, machine_mode mode, tree orig_exp) +expand_asan_emit_allocas_unpoison (tree exp) { - tree fndecl, fn; - enum built_in_function fcode; - machine_mode val_mode; - char c; - unsigned int dest_align; - rtx dest_mem, dest_addr, len_rtx; - HOST_WIDE_INT expected_size = -1; - unsigned int expected_align = 0; - unsigned HOST_WIDE_INT min_size; - unsigned HOST_WIDE_INT max_size; - unsigned HOST_WIDE_INT probable_max_size; + tree arg0 = CALL_EXPR_ARG (exp, 0); + tree arg1 = CALL_EXPR_ARG (exp, 1); + rtx top = expand_expr (arg0, NULL_RTX, ptr_mode, EXPAND_NORMAL); + rtx bot = expand_expr (arg1, NULL_RTX, ptr_mode, EXPAND_NORMAL); + rtx off = expand_simple_binop (Pmode, MINUS, virtual_stack_dynamic_rtx, + stack_pointer_rtx, NULL_RTX, 0, + OPTAB_LIB_WIDEN); + off = convert_modes (ptr_mode, Pmode, off, 0); + bot = expand_simple_binop (ptr_mode, PLUS, bot, off, NULL_RTX, 0, + OPTAB_LIB_WIDEN); + rtx ret = init_one_libfunc ("__asan_allocas_unpoison"); + ret = emit_library_call_value (ret, NULL_RTX, LCT_NORMAL, ptr_mode, + top, ptr_mode, bot, ptr_mode); + return ret; +} - dest_align = get_pointer_alignment (dest); +/* Expand a call to bswap builtin in EXP. + Return NULL_RTX if a normal call should be emitted rather than expanding the + function in-line. If convenient, the result should be placed in TARGET. + SUBTARGET may be used as the target for computing one of EXP's operands. */ - /* If DEST is not a pointer type, don't do this operation in-line. */ - if (dest_align == 0) +static rtx +expand_builtin_bswap (machine_mode target_mode, tree exp, rtx target, + rtx subtarget) +{ + tree arg; + rtx op0; + + if (!validate_arglist (exp, INTEGER_TYPE, VOID_TYPE)) return NULL_RTX; - if (currently_expanding_gimple_stmt) - stringop_block_profile (currently_expanding_gimple_stmt, - &expected_align, &expected_size); + arg = CALL_EXPR_ARG (exp, 0); + op0 = expand_expr (arg, + subtarget && GET_MODE (subtarget) == target_mode + ? subtarget : NULL_RTX, + target_mode, EXPAND_NORMAL); + if (GET_MODE (op0) != target_mode) + op0 = convert_to_mode (target_mode, op0, 1); - if (expected_align < dest_align) - expected_align = dest_align; + target = expand_unop (target_mode, bswap_optab, op0, target, 1); - /* If the LEN parameter is zero, return DEST. */ - if (integer_zerop (len)) - { - /* Evaluate and ignore VAL in case it has side-effects. */ - expand_expr (val, const0_rtx, VOIDmode, EXPAND_NORMAL); - return expand_expr (dest, target, mode, EXPAND_NORMAL); - } + gcc_assert (target); - /* Stabilize the arguments in case we fail. */ - dest = builtin_save_expr (dest); - val = builtin_save_expr (val); - len = builtin_save_expr (len); + return convert_to_mode (target_mode, target, 1); +} - len_rtx = expand_normal (len); - determine_block_size (len, len_rtx, &min_size, &max_size, - &probable_max_size); - dest_mem = get_memory_rtx (dest, len); - val_mode = TYPE_MODE (unsigned_char_type_node); +/* Expand a call to a unary builtin in EXP. + Return NULL_RTX if a normal call should be emitted rather than expanding the + function in-line. If convenient, the result should be placed in TARGET. + SUBTARGET may be used as the target for computing one of EXP's operands. */ - if (TREE_CODE (val) != INTEGER_CST - || target_char_cast (val, &c)) - { - rtx val_rtx; +static rtx +expand_builtin_unop (machine_mode target_mode, tree exp, rtx target, + rtx subtarget, optab op_optab) +{ + rtx op0; - val_rtx = expand_normal (val); - val_rtx = convert_to_mode (val_mode, val_rtx, 0); + if (!validate_arglist (exp, INTEGER_TYPE, VOID_TYPE)) + return NULL_RTX; - /* Assume that we can memset by pieces if we can store - * the coefficients by pieces (in the required modes). - * We can't pass builtin_memset_gen_str as that emits RTL. */ - c = 1; - if (tree_fits_uhwi_p (len) - && can_store_by_pieces (tree_to_uhwi (len), - builtin_memset_read_str, &c, dest_align, - true)) - { - val_rtx = force_reg (val_mode, val_rtx); - store_by_pieces (dest_mem, tree_to_uhwi (len), - builtin_memset_gen_str, val_rtx, dest_align, - true, RETURN_BEGIN); - } - else if (!set_storage_via_setmem (dest_mem, len_rtx, val_rtx, - dest_align, expected_align, - expected_size, min_size, max_size, - probable_max_size) - && !try_store_by_multiple_pieces (dest_mem, len_rtx, - tree_ctz (len), - min_size, max_size, - val_rtx, 0, - dest_align)) - goto do_libcall; + /* Compute the argument. */ + op0 = expand_expr (CALL_EXPR_ARG (exp, 0), + (subtarget + && (TYPE_MODE (TREE_TYPE (CALL_EXPR_ARG (exp, 0))) + == GET_MODE (subtarget))) ? subtarget : NULL_RTX, + VOIDmode, EXPAND_NORMAL); + /* Compute op, into TARGET if possible. + Set TARGET to wherever the result comes back. */ + target = expand_unop (TYPE_MODE (TREE_TYPE (CALL_EXPR_ARG (exp, 0))), + op_optab, op0, target, op_optab != clrsb_optab); + gcc_assert (target); - dest_mem = force_operand (XEXP (dest_mem, 0), NULL_RTX); - dest_mem = convert_memory_address (ptr_mode, dest_mem); - return dest_mem; - } + return convert_to_mode (target_mode, target, 0); +} - if (c) - { - if (tree_fits_uhwi_p (len) - && can_store_by_pieces (tree_to_uhwi (len), - builtin_memset_read_str, &c, dest_align, - true)) - store_by_pieces (dest_mem, tree_to_uhwi (len), - builtin_memset_read_str, &c, dest_align, true, - RETURN_BEGIN); - else if (!set_storage_via_setmem (dest_mem, len_rtx, - gen_int_mode (c, val_mode), - dest_align, expected_align, - expected_size, min_size, max_size, - probable_max_size) - && !try_store_by_multiple_pieces (dest_mem, len_rtx, - tree_ctz (len), - min_size, max_size, - NULL_RTX, c, - dest_align)) - goto do_libcall; +/* Expand a call to __builtin_expect. We just return our argument + as the builtin_expect semantic should've been already executed by + tree branch prediction pass. */ - dest_mem = force_operand (XEXP (dest_mem, 0), NULL_RTX); - dest_mem = convert_memory_address (ptr_mode, dest_mem); - return dest_mem; - } +static rtx +expand_builtin_expect (tree exp, rtx target) +{ + tree arg; - set_mem_align (dest_mem, dest_align); - dest_addr = clear_storage_hints (dest_mem, len_rtx, - CALL_EXPR_TAILCALL (orig_exp) - ? BLOCK_OP_TAILCALL : BLOCK_OP_NORMAL, - expected_align, expected_size, - min_size, max_size, - probable_max_size, tree_ctz (len)); + if (call_expr_nargs (exp) < 2) + return const0_rtx; + arg = CALL_EXPR_ARG (exp, 0); - if (dest_addr == 0) - { - dest_addr = force_operand (XEXP (dest_mem, 0), NULL_RTX); - dest_addr = convert_memory_address (ptr_mode, dest_addr); - } + target = expand_expr (arg, target, VOIDmode, EXPAND_NORMAL); + /* When guessing was done, the hints should be already stripped away. */ + gcc_assert (!flag_guess_branch_prob + || optimize == 0 || seen_error ()); + return target; +} - return dest_addr; +/* Expand a call to __builtin_expect_with_probability. We just return our + argument as the builtin_expect semantic should've been already executed by + tree branch prediction pass. */ - do_libcall: - fndecl = get_callee_fndecl (orig_exp); - fcode = DECL_FUNCTION_CODE (fndecl); - if (fcode == BUILT_IN_MEMSET) - fn = build_call_nofold_loc (EXPR_LOCATION (orig_exp), fndecl, 3, - dest, val, len); - else if (fcode == BUILT_IN_BZERO) - fn = build_call_nofold_loc (EXPR_LOCATION (orig_exp), fndecl, 2, - dest, len); - else - gcc_unreachable (); - gcc_assert (TREE_CODE (fn) == CALL_EXPR); - CALL_EXPR_TAILCALL (fn) = CALL_EXPR_TAILCALL (orig_exp); - return expand_call (fn, target, target == const0_rtx); +static rtx +expand_builtin_expect_with_probability (tree exp, rtx target) +{ + tree arg; + + if (call_expr_nargs (exp) < 3) + return const0_rtx; + arg = CALL_EXPR_ARG (exp, 0); + + target = expand_expr (arg, target, VOIDmode, EXPAND_NORMAL); + /* When guessing was done, the hints should be already stripped away. */ + gcc_assert (!flag_guess_branch_prob + || optimize == 0 || seen_error ()); + return target; } -/* Expand expression EXP, which is a call to the bzero builtin. Return - NULL_RTX if we failed the caller should emit a normal call. */ + +/* Expand a call to __builtin_assume_aligned. We just return our first + argument as the builtin_assume_aligned semantic should've been already + executed by CCP. */ static rtx -expand_builtin_bzero (tree exp) +expand_builtin_assume_aligned (tree exp, rtx target) { - if (!validate_arglist (exp, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) - return NULL_RTX; - - tree dest = CALL_EXPR_ARG (exp, 0); - tree size = CALL_EXPR_ARG (exp, 1); + if (call_expr_nargs (exp) < 2) + return const0_rtx; + target = expand_expr (CALL_EXPR_ARG (exp, 0), target, VOIDmode, + EXPAND_NORMAL); + gcc_assert (!TREE_SIDE_EFFECTS (CALL_EXPR_ARG (exp, 1)) + && (call_expr_nargs (exp) < 3 + || !TREE_SIDE_EFFECTS (CALL_EXPR_ARG (exp, 2)))); + return target; +} - check_memop_access (exp, dest, NULL_TREE, size); +void +expand_builtin_trap (void) +{ + if (targetm.have_trap ()) + { + rtx_insn *insn = emit_insn (targetm.gen_trap ()); + /* For trap insns when not accumulating outgoing args force + REG_ARGS_SIZE note to prevent crossjumping of calls with + different args sizes. */ + if (!ACCUMULATE_OUTGOING_ARGS) + add_args_size_note (insn, stack_pointer_delta); + } + else + { + tree fn = builtin_decl_implicit (BUILT_IN_ABORT); + tree call_expr = build_call_expr (fn, 0); + expand_call (call_expr, NULL_RTX, false); + } - /* New argument list transforming bzero(ptr x, int y) to - memset(ptr x, int 0, size_t y). This is done this way - so that if it isn't expanded inline, we fallback to - calling bzero instead of memset. */ + emit_barrier (); +} - location_t loc = EXPR_LOCATION (exp); +/* Expand a call to __builtin_unreachable. We do nothing except emit + a barrier saying that control flow will not pass here. - return expand_builtin_memset_args (dest, integer_zero_node, - fold_convert_loc (loc, - size_type_node, size), - const0_rtx, VOIDmode, exp); + It is the responsibility of the program being compiled to ensure + that control flow does never reach __builtin_unreachable. */ +static void +expand_builtin_unreachable (void) +{ + emit_barrier (); } -/* Try to expand cmpstr operation ICODE with the given operands. - Return the result rtx on success, otherwise return null. */ +/* Expand EXP, a call to fabs, fabsf or fabsl. + Return NULL_RTX if a normal call should be emitted rather than expanding + the function inline. If convenient, the result should be placed + in TARGET. SUBTARGET may be used as the target for computing + the operand. */ static rtx -expand_cmpstr (insn_code icode, rtx target, rtx arg1_rtx, rtx arg2_rtx, - HOST_WIDE_INT align) +expand_builtin_fabs (tree exp, rtx target, rtx subtarget) { - machine_mode insn_mode = insn_data[icode].operand[0].mode; + machine_mode mode; + tree arg; + rtx op0; - if (target && (!REG_P (target) || HARD_REGISTER_P (target))) - target = NULL_RTX; + if (!validate_arglist (exp, REAL_TYPE, VOID_TYPE)) + return NULL_RTX; - class expand_operand ops[4]; - create_output_operand (&ops[0], target, insn_mode); - create_fixed_operand (&ops[1], arg1_rtx); - create_fixed_operand (&ops[2], arg2_rtx); - create_integer_operand (&ops[3], align); - if (maybe_expand_insn (icode, 4, ops)) - return ops[0].value; - return NULL_RTX; + arg = CALL_EXPR_ARG (exp, 0); + CALL_EXPR_ARG (exp, 0) = arg = builtin_save_expr (arg); + mode = TYPE_MODE (TREE_TYPE (arg)); + op0 = expand_expr (arg, subtarget, VOIDmode, EXPAND_NORMAL); + return expand_abs (mode, op0, target, 0, safe_from_p (target, arg, 1)); } -/* Expand expression EXP, which is a call to the memcmp built-in function. - Return NULL_RTX if we failed and the caller should emit a normal call, - otherwise try to get the result in TARGET, if convenient. - RESULT_EQ is true if we can relax the returned value to be either zero - or nonzero, without caring about the sign. */ +/* Expand EXP, a call to copysign, copysignf, or copysignl. + Return NULL is a normal call should be emitted rather than expanding the + function inline. If convenient, the result should be placed in TARGET. + SUBTARGET may be used as the target for computing the operand. */ static rtx -expand_builtin_memcmp (tree exp, rtx target, bool result_eq) +expand_builtin_copysign (tree exp, rtx target, rtx subtarget) { - if (!validate_arglist (exp, - POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) + rtx op0, op1; + tree arg; + + if (!validate_arglist (exp, REAL_TYPE, REAL_TYPE, VOID_TYPE)) return NULL_RTX; - tree arg1 = CALL_EXPR_ARG (exp, 0); - tree arg2 = CALL_EXPR_ARG (exp, 1); - tree len = CALL_EXPR_ARG (exp, 2); + arg = CALL_EXPR_ARG (exp, 0); + op0 = expand_expr (arg, subtarget, VOIDmode, EXPAND_NORMAL); - /* Diagnose calls where the specified length exceeds the size of either - object. */ - if (!check_read_access (exp, arg1, len, 0) - || !check_read_access (exp, arg2, len, 0)) - return NULL_RTX; + arg = CALL_EXPR_ARG (exp, 1); + op1 = expand_normal (arg); - /* Due to the performance benefit, always inline the calls first - when result_eq is false. */ - rtx result = NULL_RTX; - enum built_in_function fcode = DECL_FUNCTION_CODE (get_callee_fndecl (exp)); - if (!result_eq && fcode != BUILT_IN_BCMP) - { - result = inline_expand_builtin_bytecmp (exp, target); - if (result) - return result; - } + return expand_copysign (op0, op1, target); +} - machine_mode mode = TYPE_MODE (TREE_TYPE (exp)); - location_t loc = EXPR_LOCATION (exp); +/* Emit a call to __builtin___clear_cache. */ - unsigned int arg1_align = get_pointer_alignment (arg1) / BITS_PER_UNIT; - unsigned int arg2_align = get_pointer_alignment (arg2) / BITS_PER_UNIT; +void +default_emit_call_builtin___clear_cache (rtx begin, rtx end) +{ + rtx callee = gen_rtx_SYMBOL_REF (Pmode, + BUILTIN_ASM_NAME_PTR + (BUILT_IN_CLEAR_CACHE)); - /* If we don't have POINTER_TYPE, call the function. */ - if (arg1_align == 0 || arg2_align == 0) - return NULL_RTX; + emit_library_call (callee, + LCT_NORMAL, VOIDmode, + convert_memory_address (ptr_mode, begin), ptr_mode, + convert_memory_address (ptr_mode, end), ptr_mode); +} - rtx arg1_rtx = get_memory_rtx (arg1, len); - rtx arg2_rtx = get_memory_rtx (arg2, len); - rtx len_rtx = expand_normal (fold_convert_loc (loc, sizetype, len)); +/* Emit a call to __builtin___clear_cache, unless the target specifies + it as do-nothing. This function can be used by trampoline + finalizers to duplicate the effects of expanding a call to the + clear_cache builtin. */ - /* Set MEM_SIZE as appropriate. */ - if (CONST_INT_P (len_rtx)) +void +maybe_emit_call_builtin___clear_cache (rtx begin, rtx end) +{ + if ((GET_MODE (begin) != ptr_mode && GET_MODE (begin) != Pmode) + || (GET_MODE (end) != ptr_mode && GET_MODE (end) != Pmode)) { - set_mem_size (arg1_rtx, INTVAL (len_rtx)); - set_mem_size (arg2_rtx, INTVAL (len_rtx)); + error ("both arguments to %<__builtin___clear_cache%> must be pointers"); + return; } - by_pieces_constfn constfn = NULL; + if (targetm.have_clear_cache ()) + { + /* We have a "clear_cache" insn, and it will handle everything. */ + class expand_operand ops[2]; - /* Try to get the byte representation of the constant ARG2 (or, only - when the function's result is used for equality to zero, ARG1) - points to, with its byte size in NBYTES. */ - unsigned HOST_WIDE_INT nbytes; - const char *rep = getbyterep (arg2, &nbytes); - if (result_eq && rep == NULL) + create_address_operand (&ops[0], begin); + create_address_operand (&ops[1], end); + + if (maybe_expand_insn (targetm.code_for_clear_cache, 2, ops)) + return; + } + else { - /* For equality to zero the arguments are interchangeable. */ - rep = getbyterep (arg1, &nbytes); - if (rep != NULL) - std::swap (arg1_rtx, arg2_rtx); +#ifndef CLEAR_INSN_CACHE + /* There is no "clear_cache" insn, and __clear_cache() in libgcc + does nothing. There is no need to call it. Do nothing. */ + return; +#endif /* CLEAR_INSN_CACHE */ } - /* If the function's constant bound LEN_RTX is less than or equal - to the byte size of the representation of the constant argument, - and if block move would be done by pieces, we can avoid loading - the bytes from memory and only store the computed constant result. */ - if (rep - && CONST_INT_P (len_rtx) - && (unsigned HOST_WIDE_INT) INTVAL (len_rtx) <= nbytes) - constfn = builtin_memcpy_read_str; + targetm.calls.emit_call_builtin___clear_cache (begin, end); +} - result = emit_block_cmp_hints (arg1_rtx, arg2_rtx, len_rtx, - TREE_TYPE (len), target, - result_eq, constfn, - CONST_CAST (char *, rep)); +/* Expand a call to __builtin___clear_cache. */ - if (result) +static void +expand_builtin___clear_cache (tree exp) +{ + tree begin, end; + rtx begin_rtx, end_rtx; + + /* We must not expand to a library call. If we did, any + fallback library function in libgcc that might contain a call to + __builtin___clear_cache() would recurse infinitely. */ + if (!validate_arglist (exp, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) { - /* Return the value in the proper mode for this function. */ - if (GET_MODE (result) == mode) - return result; + error ("both arguments to %<__builtin___clear_cache%> must be pointers"); + return; + } - if (target != 0) - { - convert_move (target, result, 0); - return target; - } + begin = CALL_EXPR_ARG (exp, 0); + begin_rtx = expand_expr (begin, NULL_RTX, Pmode, EXPAND_NORMAL); - return convert_to_mode (mode, result, 0); - } + end = CALL_EXPR_ARG (exp, 1); + end_rtx = expand_expr (end, NULL_RTX, Pmode, EXPAND_NORMAL); - return NULL_RTX; + maybe_emit_call_builtin___clear_cache (begin_rtx, end_rtx); } -/* Expand expression EXP, which is a call to the strcmp builtin. Return NULL_RTX - if we failed the caller should emit a normal call, otherwise try to get - the result in TARGET, if convenient. */ +/* Given a trampoline address, make sure it satisfies TRAMPOLINE_ALIGNMENT. */ static rtx -expand_builtin_strcmp (tree exp, ATTRIBUTE_UNUSED rtx target) +round_trampoline_addr (rtx tramp) { - if (!validate_arglist (exp, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) - return NULL_RTX; + rtx temp, addend, mask; - tree arg1 = CALL_EXPR_ARG (exp, 0); - tree arg2 = CALL_EXPR_ARG (exp, 1); + /* If we don't need too much alignment, we'll have been guaranteed + proper alignment by get_trampoline_type. */ + if (TRAMPOLINE_ALIGNMENT <= STACK_BOUNDARY) + return tramp; - if (!check_read_access (exp, arg1) - || !check_read_access (exp, arg2)) - return NULL_RTX; + /* Round address up to desired boundary. */ + temp = gen_reg_rtx (Pmode); + addend = gen_int_mode (TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT - 1, Pmode); + mask = gen_int_mode (-TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT, Pmode); - /* Due to the performance benefit, always inline the calls first. */ - rtx result = NULL_RTX; - result = inline_expand_builtin_bytecmp (exp, target); - if (result) - return result; + temp = expand_simple_binop (Pmode, PLUS, tramp, addend, + temp, 0, OPTAB_LIB_WIDEN); + tramp = expand_simple_binop (Pmode, AND, temp, mask, + temp, 0, OPTAB_LIB_WIDEN); - insn_code cmpstr_icode = direct_optab_handler (cmpstr_optab, SImode); - insn_code cmpstrn_icode = direct_optab_handler (cmpstrn_optab, SImode); - if (cmpstr_icode == CODE_FOR_nothing && cmpstrn_icode == CODE_FOR_nothing) - return NULL_RTX; + return tramp; +} - unsigned int arg1_align = get_pointer_alignment (arg1) / BITS_PER_UNIT; - unsigned int arg2_align = get_pointer_alignment (arg2) / BITS_PER_UNIT; +static rtx +expand_builtin_init_trampoline (tree exp, bool onstack) +{ + tree t_tramp, t_func, t_chain; + rtx m_tramp, r_tramp, r_chain, tmp; - /* If we don't have POINTER_TYPE, call the function. */ - if (arg1_align == 0 || arg2_align == 0) + if (!validate_arglist (exp, POINTER_TYPE, POINTER_TYPE, + POINTER_TYPE, VOID_TYPE)) return NULL_RTX; - /* Stabilize the arguments in case gen_cmpstr(n)si fail. */ - arg1 = builtin_save_expr (arg1); - arg2 = builtin_save_expr (arg2); + t_tramp = CALL_EXPR_ARG (exp, 0); + t_func = CALL_EXPR_ARG (exp, 1); + t_chain = CALL_EXPR_ARG (exp, 2); - rtx arg1_rtx = get_memory_rtx (arg1, NULL); - rtx arg2_rtx = get_memory_rtx (arg2, NULL); + r_tramp = expand_normal (t_tramp); + m_tramp = gen_rtx_MEM (BLKmode, r_tramp); + MEM_NOTRAP_P (m_tramp) = 1; - /* Try to call cmpstrsi. */ - if (cmpstr_icode != CODE_FOR_nothing) - result = expand_cmpstr (cmpstr_icode, target, arg1_rtx, arg2_rtx, - MIN (arg1_align, arg2_align)); + /* If ONSTACK, the TRAMP argument should be the address of a field + within the local function's FRAME decl. Either way, let's see if + we can fill in the MEM_ATTRs for this memory. */ + if (TREE_CODE (t_tramp) == ADDR_EXPR) + set_mem_attributes (m_tramp, TREE_OPERAND (t_tramp, 0), true); - /* Try to determine at least one length and call cmpstrnsi. */ - if (!result && cmpstrn_icode != CODE_FOR_nothing) + /* Creator of a heap trampoline is responsible for making sure the + address is aligned to at least STACK_BOUNDARY. Normally malloc + will ensure this anyhow. */ + tmp = round_trampoline_addr (r_tramp); + if (tmp != r_tramp) { - tree len; - rtx arg3_rtx; - - tree len1 = c_strlen (arg1, 1); - tree len2 = c_strlen (arg2, 1); - - if (len1) - len1 = size_binop (PLUS_EXPR, ssize_int (1), len1); - if (len2) - len2 = size_binop (PLUS_EXPR, ssize_int (1), len2); + m_tramp = change_address (m_tramp, BLKmode, tmp); + set_mem_align (m_tramp, TRAMPOLINE_ALIGNMENT); + set_mem_size (m_tramp, TRAMPOLINE_SIZE); + } - /* If we don't have a constant length for the first, use the length - of the second, if we know it. We don't require a constant for - this case; some cost analysis could be done if both are available - but neither is constant. For now, assume they're equally cheap, - unless one has side effects. If both strings have constant lengths, - use the smaller. */ + /* The FUNC argument should be the address of the nested function. + Extract the actual function decl to pass to the hook. */ + gcc_assert (TREE_CODE (t_func) == ADDR_EXPR); + t_func = TREE_OPERAND (t_func, 0); + gcc_assert (TREE_CODE (t_func) == FUNCTION_DECL); - if (!len1) - len = len2; - else if (!len2) - len = len1; - else if (TREE_SIDE_EFFECTS (len1)) - len = len2; - else if (TREE_SIDE_EFFECTS (len2)) - len = len1; - else if (TREE_CODE (len1) != INTEGER_CST) - len = len2; - else if (TREE_CODE (len2) != INTEGER_CST) - len = len1; - else if (tree_int_cst_lt (len1, len2)) - len = len1; - else - len = len2; + r_chain = expand_normal (t_chain); - /* If both arguments have side effects, we cannot optimize. */ - if (len && !TREE_SIDE_EFFECTS (len)) - { - arg3_rtx = expand_normal (len); - result = expand_cmpstrn_or_cmpmem - (cmpstrn_icode, target, arg1_rtx, arg2_rtx, TREE_TYPE (len), - arg3_rtx, MIN (arg1_align, arg2_align)); - } - } + /* Generate insns to initialize the trampoline. */ + targetm.calls.trampoline_init (m_tramp, t_func, r_chain); - tree fndecl = get_callee_fndecl (exp); - if (result) + if (onstack) { - /* Check to see if the argument was declared attribute nonstring - and if so, issue a warning since at this point it's not known - to be nul-terminated. */ - maybe_warn_nonstring_arg (fndecl, exp); + trampolines_created = 1; - /* Return the value in the proper mode for this function. */ - machine_mode mode = TYPE_MODE (TREE_TYPE (exp)); - if (GET_MODE (result) == mode) - return result; - if (target == 0) - return convert_to_mode (mode, result, 0); - convert_move (target, result, 0); - return target; + if (targetm.calls.custom_function_descriptors != 0) + warning_at (DECL_SOURCE_LOCATION (t_func), OPT_Wtrampolines, + "trampoline generated for nested function %qD", t_func); } - /* Expand the library call ourselves using a stabilized argument - list to avoid re-evaluating the function's arguments twice. */ - tree fn = build_call_nofold_loc (EXPR_LOCATION (exp), fndecl, 2, arg1, arg2); - gcc_assert (TREE_CODE (fn) == CALL_EXPR); - CALL_EXPR_TAILCALL (fn) = CALL_EXPR_TAILCALL (exp); - return expand_call (fn, target, target == const0_rtx); + return const0_rtx; } -/* Expand expression EXP, which is a call to the strncmp builtin. Return - NULL_RTX if we failed the caller should emit a normal call, otherwise - try to get the result in TARGET, if convenient. */ - static rtx -expand_builtin_strncmp (tree exp, ATTRIBUTE_UNUSED rtx target, - ATTRIBUTE_UNUSED machine_mode mode) +expand_builtin_adjust_trampoline (tree exp) { - if (!validate_arglist (exp, - POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) - return NULL_RTX; - - tree arg1 = CALL_EXPR_ARG (exp, 0); - tree arg2 = CALL_EXPR_ARG (exp, 1); - tree arg3 = CALL_EXPR_ARG (exp, 2); + rtx tramp; - if (!check_nul_terminated_array (exp, arg1, arg3) - || !check_nul_terminated_array (exp, arg2, arg3)) + if (!validate_arglist (exp, POINTER_TYPE, VOID_TYPE)) return NULL_RTX; - location_t loc = EXPR_LOCATION (exp); - tree len1 = c_strlen (arg1, 1); - tree len2 = c_strlen (arg2, 1); - - if (!len1 || !len2) - { - /* Check to see if the argument was declared attribute nonstring - and if so, issue a warning since at this point it's not known - to be nul-terminated. */ - if (!maybe_warn_nonstring_arg (get_callee_fndecl (exp), exp) - && !len1 && !len2) - { - /* A strncmp read is constrained not just by the bound but - also by the length of the shorter string. Specifying - a bound that's larger than the size of either array makes - no sense and is likely a bug. When the length of neither - of the two strings is known but the sizes of both of - the arrays they are stored in is, issue a warning if - the bound is larger than than the size of the larger - of the two arrays. */ + tramp = expand_normal (CALL_EXPR_ARG (exp, 0)); + tramp = round_trampoline_addr (tramp); + if (targetm.calls.trampoline_adjust_address) + tramp = targetm.calls.trampoline_adjust_address (tramp); - access_ref ref1 (arg3, true); - access_ref ref2 (arg3, true); + return tramp; +} - tree bndrng[2] = { NULL_TREE, NULL_TREE }; - get_size_range (arg3, bndrng, ref1.bndrng); +/* Expand a call to the builtin descriptor initialization routine. + A descriptor is made up of a couple of pointers to the static + chain and the code entry in this order. */ - tree size1 = compute_objsize (arg1, 1, &ref1); - tree size2 = compute_objsize (arg2, 1, &ref2); - tree func = get_callee_fndecl (exp); +static rtx +expand_builtin_init_descriptor (tree exp) +{ + tree t_descr, t_func, t_chain; + rtx m_descr, r_descr, r_func, r_chain; - if (size1 && size2 && bndrng[0] && !integer_zerop (bndrng[0])) - { - offset_int rem1 = ref1.size_remaining (); - offset_int rem2 = ref2.size_remaining (); - if (rem1 == 0 || rem2 == 0) - maybe_warn_for_bound (OPT_Wstringop_overread, loc, exp, func, - bndrng, integer_zero_node); - else - { - offset_int maxrem = wi::max (rem1, rem2, UNSIGNED); - if (maxrem < wi::to_offset (bndrng[0])) - maybe_warn_for_bound (OPT_Wstringop_overread, loc, exp, - func, bndrng, - wide_int_to_tree (sizetype, maxrem)); - } - } - else if (bndrng[0] - && !integer_zerop (bndrng[0]) - && ((size1 && integer_zerop (size1)) - || (size2 && integer_zerop (size2)))) - maybe_warn_for_bound (OPT_Wstringop_overread, loc, exp, func, - bndrng, integer_zero_node); - } - } - - /* Due to the performance benefit, always inline the calls first. */ - rtx result = NULL_RTX; - result = inline_expand_builtin_bytecmp (exp, target); - if (result) - return result; - - /* If c_strlen can determine an expression for one of the string - lengths, and it doesn't have side effects, then emit cmpstrnsi - using length MIN(strlen(string)+1, arg3). */ - insn_code cmpstrn_icode = direct_optab_handler (cmpstrn_optab, SImode); - if (cmpstrn_icode == CODE_FOR_nothing) + if (!validate_arglist (exp, POINTER_TYPE, POINTER_TYPE, POINTER_TYPE, + VOID_TYPE)) return NULL_RTX; - tree len; - - unsigned int arg1_align = get_pointer_alignment (arg1) / BITS_PER_UNIT; - unsigned int arg2_align = get_pointer_alignment (arg2) / BITS_PER_UNIT; - - if (len1) - len1 = size_binop_loc (loc, PLUS_EXPR, ssize_int (1), len1); - if (len2) - len2 = size_binop_loc (loc, PLUS_EXPR, ssize_int (1), len2); - - tree len3 = fold_convert_loc (loc, sizetype, arg3); - - /* If we don't have a constant length for the first, use the length - of the second, if we know it. If neither string is constant length, - use the given length argument. We don't require a constant for - this case; some cost analysis could be done if both are available - but neither is constant. For now, assume they're equally cheap, - unless one has side effects. If both strings have constant lengths, - use the smaller. */ + t_descr = CALL_EXPR_ARG (exp, 0); + t_func = CALL_EXPR_ARG (exp, 1); + t_chain = CALL_EXPR_ARG (exp, 2); - if (!len1 && !len2) - len = len3; - else if (!len1) - len = len2; - else if (!len2) - len = len1; - else if (TREE_SIDE_EFFECTS (len1)) - len = len2; - else if (TREE_SIDE_EFFECTS (len2)) - len = len1; - else if (TREE_CODE (len1) != INTEGER_CST) - len = len2; - else if (TREE_CODE (len2) != INTEGER_CST) - len = len1; - else if (tree_int_cst_lt (len1, len2)) - len = len1; - else - len = len2; + r_descr = expand_normal (t_descr); + m_descr = gen_rtx_MEM (BLKmode, r_descr); + MEM_NOTRAP_P (m_descr) = 1; + set_mem_align (m_descr, GET_MODE_ALIGNMENT (ptr_mode)); - /* If we are not using the given length, we must incorporate it here. - The actual new length parameter will be MIN(len,arg3) in this case. */ - if (len != len3) - { - len = fold_convert_loc (loc, sizetype, len); - len = fold_build2_loc (loc, MIN_EXPR, TREE_TYPE (len), len, len3); - } - rtx arg1_rtx = get_memory_rtx (arg1, len); - rtx arg2_rtx = get_memory_rtx (arg2, len); - rtx arg3_rtx = expand_normal (len); - result = expand_cmpstrn_or_cmpmem (cmpstrn_icode, target, arg1_rtx, - arg2_rtx, TREE_TYPE (len), arg3_rtx, - MIN (arg1_align, arg2_align)); + r_func = expand_normal (t_func); + r_chain = expand_normal (t_chain); - tree fndecl = get_callee_fndecl (exp); - if (result) - { - /* Return the value in the proper mode for this function. */ - mode = TYPE_MODE (TREE_TYPE (exp)); - if (GET_MODE (result) == mode) - return result; - if (target == 0) - return convert_to_mode (mode, result, 0); - convert_move (target, result, 0); - return target; - } + /* Generate insns to initialize the descriptor. */ + emit_move_insn (adjust_address_nv (m_descr, ptr_mode, 0), r_chain); + emit_move_insn (adjust_address_nv (m_descr, ptr_mode, + POINTER_SIZE / BITS_PER_UNIT), r_func); - /* Expand the library call ourselves using a stabilized argument - list to avoid re-evaluating the function's arguments twice. */ - tree call = build_call_nofold_loc (loc, fndecl, 3, arg1, arg2, len); - copy_warning (call, exp); - gcc_assert (TREE_CODE (call) == CALL_EXPR); - CALL_EXPR_TAILCALL (call) = CALL_EXPR_TAILCALL (exp); - return expand_call (call, target, target == const0_rtx); + return const0_rtx; } -/* Expand a call to __builtin_saveregs, generating the result in TARGET, - if that's convenient. */ +/* Expand a call to the builtin descriptor adjustment routine. */ -rtx -expand_builtin_saveregs (void) +static rtx +expand_builtin_adjust_descriptor (tree exp) { - rtx val; - rtx_insn *seq; - - /* Don't do __builtin_saveregs more than once in a function. - Save the result of the first call and reuse it. */ - if (saveregs_value != 0) - return saveregs_value; - - /* When this function is called, it means that registers must be - saved on entry to this function. So we migrate the call to the - first insn of this function. */ - - start_sequence (); - - /* Do whatever the machine needs done in this case. */ - val = targetm.calls.expand_builtin_saveregs (); + rtx tramp; - seq = get_insns (); - end_sequence (); + if (!validate_arglist (exp, POINTER_TYPE, VOID_TYPE)) + return NULL_RTX; - saveregs_value = val; + tramp = expand_normal (CALL_EXPR_ARG (exp, 0)); - /* Put the insns after the NOTE that starts the function. If this - is inside a start_sequence, make the outer-level insn chain current, so - the code is placed at the start of the function. */ - push_topmost_sequence (); - emit_insn_after (seq, entry_of_function ()); - pop_topmost_sequence (); + /* Unalign the descriptor to allow runtime identification. */ + tramp = plus_constant (ptr_mode, tramp, + targetm.calls.custom_function_descriptors); - return val; + return force_operand (tramp, NULL_RTX); } -/* Expand a call to __builtin_next_arg. */ - +/* Expand the call EXP to the built-in signbit, signbitf or signbitl + function. The function first checks whether the back end provides + an insn to implement signbit for the respective mode. If not, it + checks whether the floating point format of the value is such that + the sign bit can be extracted. If that is not the case, error out. + EXP is the expression that is a call to the builtin function; if + convenient, the result should be placed in TARGET. */ static rtx -expand_builtin_next_arg (void) +expand_builtin_signbit (tree exp, rtx target) { - /* Checking arguments is already done in fold_builtin_next_arg - that must be called before this function. */ - return expand_binop (ptr_mode, add_optab, - crtl->args.internal_arg_pointer, - crtl->args.arg_offset_rtx, - NULL_RTX, 0, OPTAB_LIB_WIDEN); -} + const struct real_format *fmt; + scalar_float_mode fmode; + scalar_int_mode rmode, imode; + tree arg; + int word, bitpos; + enum insn_code icode; + rtx temp; + location_t loc = EXPR_LOCATION (exp); -/* Make it easier for the backends by protecting the valist argument - from multiple evaluations. */ + if (!validate_arglist (exp, REAL_TYPE, VOID_TYPE)) + return NULL_RTX; -static tree -stabilize_va_list_loc (location_t loc, tree valist, int needs_lvalue) -{ - tree vatype = targetm.canonical_va_list_type (TREE_TYPE (valist)); + arg = CALL_EXPR_ARG (exp, 0); + fmode = SCALAR_FLOAT_TYPE_MODE (TREE_TYPE (arg)); + rmode = SCALAR_INT_TYPE_MODE (TREE_TYPE (exp)); + fmt = REAL_MODE_FORMAT (fmode); - /* The current way of determining the type of valist is completely - bogus. We should have the information on the va builtin instead. */ - if (!vatype) - vatype = targetm.fn_abi_va_list (cfun->decl); + arg = builtin_save_expr (arg); - if (TREE_CODE (vatype) == ARRAY_TYPE) + /* Expand the argument yielding a RTX expression. */ + temp = expand_normal (arg); + + /* Check if the back end provides an insn that handles signbit for the + argument's mode. */ + icode = optab_handler (signbit_optab, fmode); + if (icode != CODE_FOR_nothing) { - if (TREE_SIDE_EFFECTS (valist)) - valist = save_expr (valist); + rtx_insn *last = get_last_insn (); + target = gen_reg_rtx (TYPE_MODE (TREE_TYPE (exp))); + if (maybe_emit_unop_insn (icode, target, temp, UNKNOWN)) + return target; + delete_insns_since (last); + } - /* For this case, the backends will be expecting a pointer to - vatype, but it's possible we've actually been given an array - (an actual TARGET_CANONICAL_VA_LIST_TYPE (valist)). - So fix it. */ - if (TREE_CODE (TREE_TYPE (valist)) == ARRAY_TYPE) - { - tree p1 = build_pointer_type (TREE_TYPE (vatype)); - valist = build_fold_addr_expr_with_type_loc (loc, valist, p1); - } + /* For floating point formats without a sign bit, implement signbit + as "ARG < 0.0". */ + bitpos = fmt->signbit_ro; + if (bitpos < 0) + { + /* But we can't do this if the format supports signed zero. */ + gcc_assert (!fmt->has_signed_zero || !HONOR_SIGNED_ZEROS (fmode)); + + arg = fold_build2_loc (loc, LT_EXPR, TREE_TYPE (exp), arg, + build_real (TREE_TYPE (arg), dconst0)); + return expand_expr (arg, target, VOIDmode, EXPAND_NORMAL); + } + + if (GET_MODE_SIZE (fmode) <= UNITS_PER_WORD) + { + imode = int_mode_for_mode (fmode).require (); + temp = gen_lowpart (imode, temp); } else { - tree pt = build_pointer_type (vatype); + imode = word_mode; + /* Handle targets with different FP word orders. */ + if (FLOAT_WORDS_BIG_ENDIAN) + word = (GET_MODE_BITSIZE (fmode) - bitpos) / BITS_PER_WORD; + else + word = bitpos / BITS_PER_WORD; + temp = operand_subword_force (temp, word, fmode); + bitpos = bitpos % BITS_PER_WORD; + } - if (! needs_lvalue) - { - if (! TREE_SIDE_EFFECTS (valist)) - return valist; + /* Force the intermediate word_mode (or narrower) result into a + register. This avoids attempting to create paradoxical SUBREGs + of floating point modes below. */ + temp = force_reg (imode, temp); - valist = fold_build1_loc (loc, ADDR_EXPR, pt, valist); - TREE_SIDE_EFFECTS (valist) = 1; - } + /* If the bitpos is within the "result mode" lowpart, the operation + can be implement with a single bitwise AND. Otherwise, we need + a right shift and an AND. */ - if (TREE_SIDE_EFFECTS (valist)) - valist = save_expr (valist); - valist = fold_build2_loc (loc, MEM_REF, - vatype, valist, build_int_cst (pt, 0)); + if (bitpos < GET_MODE_BITSIZE (rmode)) + { + wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (rmode)); + + if (GET_MODE_SIZE (imode) > GET_MODE_SIZE (rmode)) + temp = gen_lowpart (rmode, temp); + temp = expand_binop (rmode, and_optab, temp, + immed_wide_int_const (mask, rmode), + NULL_RTX, 1, OPTAB_LIB_WIDEN); + } + else + { + /* Perform a logical right shift to place the signbit in the least + significant bit, then truncate the result to the desired mode + and mask just this bit. */ + temp = expand_shift (RSHIFT_EXPR, imode, temp, bitpos, NULL_RTX, 1); + temp = gen_lowpart (rmode, temp); + temp = expand_binop (rmode, and_optab, temp, const1_rtx, + NULL_RTX, 1, OPTAB_LIB_WIDEN); } - return valist; + return temp; } -/* The "standard" definition of va_list is void*. */ +/* Expand fork or exec calls. TARGET is the desired target of the + call. EXP is the call. FN is the + identificator of the actual function. IGNORE is nonzero if the + value is to be ignored. */ -tree -std_build_builtin_va_list (void) +static rtx +expand_builtin_fork_or_exec (tree fn, tree exp, rtx target, int ignore) { - return ptr_type_node; -} + tree id, decl; + tree call; -/* The "standard" abi va_list is va_list_type_node. */ + if (DECL_FUNCTION_CODE (fn) != BUILT_IN_FORK) + { + tree path = CALL_EXPR_ARG (exp, 0); + /* Detect unterminated path. */ + if (!check_read_access (exp, path)) + return NULL_RTX; -tree -std_fn_abi_va_list (tree fndecl ATTRIBUTE_UNUSED) -{ - return va_list_type_node; -} + /* Also detect unterminated first argument. */ + switch (DECL_FUNCTION_CODE (fn)) + { + case BUILT_IN_EXECL: + case BUILT_IN_EXECLE: + case BUILT_IN_EXECLP: + if (!check_read_access (exp, path)) + return NULL_RTX; + default: + break; + } + } -/* The "standard" type of va_list is va_list_type_node. */ -tree -std_canonical_va_list_type (tree type) -{ - tree wtype, htype; + /* If we are not profiling, just call the function. */ + if (!profile_arc_flag) + return NULL_RTX; - wtype = va_list_type_node; - htype = type; + /* Otherwise call the wrapper. This should be equivalent for the rest of + compiler, so the code does not diverge, and the wrapper may run the + code necessary for keeping the profiling sane. */ - if (TREE_CODE (wtype) == ARRAY_TYPE) + switch (DECL_FUNCTION_CODE (fn)) { - /* If va_list is an array type, the argument may have decayed - to a pointer type, e.g. by being passed to another function. - In that case, unwrap both types so that we can compare the - underlying records. */ - if (TREE_CODE (htype) == ARRAY_TYPE - || POINTER_TYPE_P (htype)) - { - wtype = TREE_TYPE (wtype); - htype = TREE_TYPE (htype); - } - } - if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype)) - return va_list_type_node; + case BUILT_IN_FORK: + id = get_identifier ("__gcov_fork"); + break; - return NULL_TREE; -} + case BUILT_IN_EXECL: + id = get_identifier ("__gcov_execl"); + break; -/* The "standard" implementation of va_start: just assign `nextarg' to - the variable. */ + case BUILT_IN_EXECV: + id = get_identifier ("__gcov_execv"); + break; -void -std_expand_builtin_va_start (tree valist, rtx nextarg) -{ - rtx va_r = expand_expr (valist, NULL_RTX, VOIDmode, EXPAND_WRITE); - convert_move (va_r, nextarg, 0); -} + case BUILT_IN_EXECLP: + id = get_identifier ("__gcov_execlp"); + break; -/* Expand EXP, a call to __builtin_va_start. */ + case BUILT_IN_EXECLE: + id = get_identifier ("__gcov_execle"); + break; -static rtx -expand_builtin_va_start (tree exp) -{ - rtx nextarg; - tree valist; - location_t loc = EXPR_LOCATION (exp); + case BUILT_IN_EXECVP: + id = get_identifier ("__gcov_execvp"); + break; - if (call_expr_nargs (exp) < 2) - { - error_at (loc, "too few arguments to function %"); - return const0_rtx; - } + case BUILT_IN_EXECVE: + id = get_identifier ("__gcov_execve"); + break; - if (fold_builtin_next_arg (exp, true)) - return const0_rtx; + default: + gcc_unreachable (); + } - nextarg = expand_builtin_next_arg (); - valist = stabilize_va_list_loc (loc, CALL_EXPR_ARG (exp, 0), 1); + decl = build_decl (DECL_SOURCE_LOCATION (fn), + FUNCTION_DECL, id, TREE_TYPE (fn)); + DECL_EXTERNAL (decl) = 1; + TREE_PUBLIC (decl) = 1; + DECL_ARTIFICIAL (decl) = 1; + TREE_NOTHROW (decl) = 1; + DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT; + DECL_VISIBILITY_SPECIFIED (decl) = 1; + call = rewrite_call_expr (EXPR_LOCATION (exp), exp, 0, decl, 0); + return expand_call (call, target, ignore); + } - if (targetm.expand_builtin_va_start) - targetm.expand_builtin_va_start (valist, nextarg); - else - std_expand_builtin_va_start (valist, nextarg); - return const0_rtx; -} + +/* Reconstitute a mode for a __sync intrinsic operation. Since the type of + the pointer in these functions is void*, the tree optimizers may remove + casts. The mode computed in expand_builtin isn't reliable either, due + to __sync_bool_compare_and_swap. -/* Expand EXP, a call to __builtin_va_end. */ + FCODE_DIFF should be fcode - base, where base is the FOO_1 code for the + group of builtins. This gives us log2 of the mode size. */ -static rtx -expand_builtin_va_end (tree exp) +static inline machine_mode +get_builtin_sync_mode (int fcode_diff) { - tree valist = CALL_EXPR_ARG (exp, 0); - - /* Evaluate for side effects, if needed. I hate macros that don't - do that. */ - if (TREE_SIDE_EFFECTS (valist)) - expand_expr (valist, const0_rtx, VOIDmode, EXPAND_NORMAL); - - return const0_rtx; + /* The size is not negotiable, so ask not to get BLKmode in return + if the target indicates that a smaller size would be better. */ + return int_mode_for_size (BITS_PER_UNIT << fcode_diff, 0).require (); } -/* Expand EXP, a call to __builtin_va_copy. We do this as a - builtin rather than just as an assignment in stdarg.h because of the - nastiness of array-type va_list types. */ +/* Expand the memory expression LOC and return the appropriate memory operand + for the builtin_sync operations. */ static rtx -expand_builtin_va_copy (tree exp) +get_builtin_sync_mem (tree loc, machine_mode mode) { - tree dst, src, t; - location_t loc = EXPR_LOCATION (exp); + rtx addr, mem; + int addr_space = TYPE_ADDR_SPACE (POINTER_TYPE_P (TREE_TYPE (loc)) + ? TREE_TYPE (TREE_TYPE (loc)) + : TREE_TYPE (loc)); + scalar_int_mode addr_mode = targetm.addr_space.address_mode (addr_space); - dst = CALL_EXPR_ARG (exp, 0); - src = CALL_EXPR_ARG (exp, 1); + addr = expand_expr (loc, NULL_RTX, addr_mode, EXPAND_SUM); + addr = convert_memory_address (addr_mode, addr); - dst = stabilize_va_list_loc (loc, dst, 1); - src = stabilize_va_list_loc (loc, src, 0); + /* Note that we explicitly do not want any alias information for this + memory, so that we kill all other live memories. Otherwise we don't + satisfy the full barrier semantics of the intrinsic. */ + mem = gen_rtx_MEM (mode, addr); - gcc_assert (cfun != NULL && cfun->decl != NULL_TREE); + set_mem_addr_space (mem, addr_space); - if (TREE_CODE (targetm.fn_abi_va_list (cfun->decl)) != ARRAY_TYPE) - { - t = build2 (MODIFY_EXPR, targetm.fn_abi_va_list (cfun->decl), dst, src); - TREE_SIDE_EFFECTS (t) = 1; - expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); - } - else - { - rtx dstb, srcb, size; + mem = validize_mem (mem); - /* Evaluate to pointers. */ - dstb = expand_expr (dst, NULL_RTX, Pmode, EXPAND_NORMAL); - srcb = expand_expr (src, NULL_RTX, Pmode, EXPAND_NORMAL); - size = expand_expr (TYPE_SIZE_UNIT (targetm.fn_abi_va_list (cfun->decl)), - NULL_RTX, VOIDmode, EXPAND_NORMAL); + /* The alignment needs to be at least according to that of the mode. */ + set_mem_align (mem, MAX (GET_MODE_ALIGNMENT (mode), + get_pointer_alignment (loc))); + set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER); + MEM_VOLATILE_P (mem) = 1; - dstb = convert_memory_address (Pmode, dstb); - srcb = convert_memory_address (Pmode, srcb); + return mem; +} - /* "Dereference" to BLKmode memories. */ - dstb = gen_rtx_MEM (BLKmode, dstb); - set_mem_alias_set (dstb, get_alias_set (TREE_TYPE (TREE_TYPE (dst)))); - set_mem_align (dstb, TYPE_ALIGN (targetm.fn_abi_va_list (cfun->decl))); - srcb = gen_rtx_MEM (BLKmode, srcb); - set_mem_alias_set (srcb, get_alias_set (TREE_TYPE (TREE_TYPE (src)))); - set_mem_align (srcb, TYPE_ALIGN (targetm.fn_abi_va_list (cfun->decl))); +/* Make sure an argument is in the right mode. + EXP is the tree argument. + MODE is the mode it should be in. */ - /* Copy. */ - emit_block_move (dstb, srcb, size, BLOCK_OP_NORMAL); +static rtx +expand_expr_force_mode (tree exp, machine_mode mode) +{ + rtx val; + machine_mode old_mode; + + if (TREE_CODE (exp) == SSA_NAME + && TYPE_MODE (TREE_TYPE (exp)) != mode) + { + /* Undo argument promotion if possible, as combine might not + be able to do it later due to MEM_VOLATILE_P uses in the + patterns. */ + gimple *g = get_gimple_for_ssa_name (exp); + if (g && gimple_assign_cast_p (g)) + { + tree rhs = gimple_assign_rhs1 (g); + tree_code code = gimple_assign_rhs_code (g); + if (CONVERT_EXPR_CODE_P (code) + && TYPE_MODE (TREE_TYPE (rhs)) == mode + && INTEGRAL_TYPE_P (TREE_TYPE (exp)) + && INTEGRAL_TYPE_P (TREE_TYPE (rhs)) + && (TYPE_PRECISION (TREE_TYPE (exp)) + > TYPE_PRECISION (TREE_TYPE (rhs)))) + exp = rhs; + } } - return const0_rtx; + val = expand_expr (exp, NULL_RTX, mode, EXPAND_NORMAL); + /* If VAL is promoted to a wider mode, convert it back to MODE. Take care + of CONST_INTs, where we know the old_mode only from the call argument. */ + + old_mode = GET_MODE (val); + if (old_mode == VOIDmode) + old_mode = TYPE_MODE (TREE_TYPE (exp)); + val = convert_modes (mode, old_mode, val, 1); + return val; } -/* Expand a call to one of the builtin functions __builtin_frame_address or - __builtin_return_address. */ + +/* Expand the __sync_xxx_and_fetch and __sync_fetch_and_xxx intrinsics. + EXP is the CALL_EXPR. CODE is the rtx code + that corresponds to the arithmetic or logical operation from the name; + an exception here is that NOT actually means NAND. TARGET is an optional + place for us to store the results; AFTER is true if this is the + fetch_and_xxx form. */ static rtx -expand_builtin_frame_address (tree fndecl, tree exp) +expand_builtin_sync_operation (machine_mode mode, tree exp, + enum rtx_code code, bool after, + rtx target) { - /* The argument must be a nonnegative integer constant. - It counts the number of frames to scan up the stack. - The value is either the frame pointer value or the return - address saved in that frame. */ - if (call_expr_nargs (exp) == 0) - /* Warning about missing arg was already issued. */ - return const0_rtx; - else if (! tree_fits_uhwi_p (CALL_EXPR_ARG (exp, 0))) - { - error ("invalid argument to %qD", fndecl); - return const0_rtx; - } - else + rtx val, mem; + location_t loc = EXPR_LOCATION (exp); + + if (code == NOT && warn_sync_nand) { - /* Number of frames to scan up the stack. */ - unsigned HOST_WIDE_INT count = tree_to_uhwi (CALL_EXPR_ARG (exp, 0)); + tree fndecl = get_callee_fndecl (exp); + enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); - rtx tem = expand_builtin_return_addr (DECL_FUNCTION_CODE (fndecl), count); + static bool warned_f_a_n, warned_n_a_f; - /* Some ports cannot access arbitrary stack frames. */ - if (tem == NULL) + switch (fcode) { - warning (0, "unsupported argument to %qD", fndecl); - return const0_rtx; - } + case BUILT_IN_SYNC_FETCH_AND_NAND_1: + case BUILT_IN_SYNC_FETCH_AND_NAND_2: + case BUILT_IN_SYNC_FETCH_AND_NAND_4: + case BUILT_IN_SYNC_FETCH_AND_NAND_8: + case BUILT_IN_SYNC_FETCH_AND_NAND_16: + if (warned_f_a_n) + break; - if (count) - { - /* Warn since no effort is made to ensure that any frame - beyond the current one exists or can be safely reached. */ - warning (OPT_Wframe_address, "calling %qD with " - "a nonzero argument is unsafe", fndecl); - } + fndecl = builtin_decl_implicit (BUILT_IN_SYNC_FETCH_AND_NAND_N); + inform (loc, "%qD changed semantics in GCC 4.4", fndecl); + warned_f_a_n = true; + break; - /* For __builtin_frame_address, return what we've got. */ - if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_FRAME_ADDRESS) - return tem; + case BUILT_IN_SYNC_NAND_AND_FETCH_1: + case BUILT_IN_SYNC_NAND_AND_FETCH_2: + case BUILT_IN_SYNC_NAND_AND_FETCH_4: + case BUILT_IN_SYNC_NAND_AND_FETCH_8: + case BUILT_IN_SYNC_NAND_AND_FETCH_16: + if (warned_n_a_f) + break; - if (!REG_P (tem) - && ! CONSTANT_P (tem)) - tem = copy_addr_to_reg (tem); - return tem; + fndecl = builtin_decl_implicit (BUILT_IN_SYNC_NAND_AND_FETCH_N); + inform (loc, "%qD changed semantics in GCC 4.4", fndecl); + warned_n_a_f = true; + break; + + default: + gcc_unreachable (); + } } + + /* Expand the operands. */ + mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); + val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode); + + return expand_atomic_fetch_op (target, mem, val, code, MEMMODEL_SYNC_SEQ_CST, + after); } -/* Expand EXP, a call to the alloca builtin. Return NULL_RTX if we - failed and the caller should emit a normal call. */ +/* Expand the __sync_val_compare_and_swap and __sync_bool_compare_and_swap + intrinsics. EXP is the CALL_EXPR. IS_BOOL is + true if this is the boolean form. TARGET is a place for us to store the + results; this is NOT optional if IS_BOOL is true. */ static rtx -expand_builtin_alloca (tree exp) +expand_builtin_compare_and_swap (machine_mode mode, tree exp, + bool is_bool, rtx target) { - rtx op0; - rtx result; - unsigned int align; - tree fndecl = get_callee_fndecl (exp); - HOST_WIDE_INT max_size; - enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); - bool alloca_for_var = CALL_ALLOCA_FOR_VAR_P (exp); - bool valid_arglist - = (fcode == BUILT_IN_ALLOCA_WITH_ALIGN_AND_MAX - ? validate_arglist (exp, INTEGER_TYPE, INTEGER_TYPE, INTEGER_TYPE, - VOID_TYPE) - : fcode == BUILT_IN_ALLOCA_WITH_ALIGN - ? validate_arglist (exp, INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE) - : validate_arglist (exp, INTEGER_TYPE, VOID_TYPE)); + rtx old_val, new_val, mem; + rtx *pbool, *poval; - if (!valid_arglist) - return NULL_RTX; + /* Expand the operands. */ + mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); + old_val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode); + new_val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 2), mode); - if ((alloca_for_var - && warn_vla_limit >= HOST_WIDE_INT_MAX - && warn_alloc_size_limit < warn_vla_limit) - || (!alloca_for_var - && warn_alloca_limit >= HOST_WIDE_INT_MAX - && warn_alloc_size_limit < warn_alloca_limit - )) + pbool = poval = NULL; + if (target != const0_rtx) { - /* -Walloca-larger-than and -Wvla-larger-than settings of - less than HOST_WIDE_INT_MAX override the more general - -Walloc-size-larger-than so unless either of the former - options is smaller than the last one (wchich would imply - that the call was already checked), check the alloca - arguments for overflow. */ - tree args[] = { CALL_EXPR_ARG (exp, 0), NULL_TREE }; - int idx[] = { 0, -1 }; - maybe_warn_alloc_args_overflow (fndecl, exp, args, idx); + if (is_bool) + pbool = ⌖ + else + poval = ⌖ } + if (!expand_atomic_compare_and_swap (pbool, poval, mem, old_val, new_val, + false, MEMMODEL_SYNC_SEQ_CST, + MEMMODEL_SYNC_SEQ_CST)) + return NULL_RTX; - /* Compute the argument. */ - op0 = expand_normal (CALL_EXPR_ARG (exp, 0)); - - /* Compute the alignment. */ - align = (fcode == BUILT_IN_ALLOCA - ? BIGGEST_ALIGNMENT - : TREE_INT_CST_LOW (CALL_EXPR_ARG (exp, 1))); - - /* Compute the maximum size. */ - max_size = (fcode == BUILT_IN_ALLOCA_WITH_ALIGN_AND_MAX - ? TREE_INT_CST_LOW (CALL_EXPR_ARG (exp, 2)) - : -1); - - /* Allocate the desired space. If the allocation stems from the declaration - of a variable-sized object, it cannot accumulate. */ - result - = allocate_dynamic_stack_space (op0, 0, align, max_size, alloca_for_var); - result = convert_memory_address (ptr_mode, result); - - /* Dynamic allocations for variables are recorded during gimplification. */ - if (!alloca_for_var && (flag_callgraph_info & CALLGRAPH_INFO_DYNAMIC_ALLOC)) - record_dynamic_alloc (exp); - - return result; + return target; } -/* Emit a call to __asan_allocas_unpoison call in EXP. Add to second argument - of the call virtual_stack_dynamic_rtx - stack_pointer_rtx, which is the - STACK_DYNAMIC_OFFSET value. See motivation for this in comment to - handle_builtin_stack_restore function. */ +/* Expand the __sync_lock_test_and_set intrinsic. Note that the most + general form is actually an atomic exchange, and some targets only + support a reduced form with the second argument being a constant 1. + EXP is the CALL_EXPR; TARGET is an optional place for us to store + the results. */ static rtx -expand_asan_emit_allocas_unpoison (tree exp) +expand_builtin_sync_lock_test_and_set (machine_mode mode, tree exp, + rtx target) { - tree arg0 = CALL_EXPR_ARG (exp, 0); - tree arg1 = CALL_EXPR_ARG (exp, 1); - rtx top = expand_expr (arg0, NULL_RTX, ptr_mode, EXPAND_NORMAL); - rtx bot = expand_expr (arg1, NULL_RTX, ptr_mode, EXPAND_NORMAL); - rtx off = expand_simple_binop (Pmode, MINUS, virtual_stack_dynamic_rtx, - stack_pointer_rtx, NULL_RTX, 0, - OPTAB_LIB_WIDEN); - off = convert_modes (ptr_mode, Pmode, off, 0); - bot = expand_simple_binop (ptr_mode, PLUS, bot, off, NULL_RTX, 0, - OPTAB_LIB_WIDEN); - rtx ret = init_one_libfunc ("__asan_allocas_unpoison"); - ret = emit_library_call_value (ret, NULL_RTX, LCT_NORMAL, ptr_mode, - top, ptr_mode, bot, ptr_mode); - return ret; + rtx val, mem; + + /* Expand the operands. */ + mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); + val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode); + + return expand_sync_lock_test_and_set (target, mem, val); } -/* Expand a call to bswap builtin in EXP. - Return NULL_RTX if a normal call should be emitted rather than expanding the - function in-line. If convenient, the result should be placed in TARGET. - SUBTARGET may be used as the target for computing one of EXP's operands. */ +/* Expand the __sync_lock_release intrinsic. EXP is the CALL_EXPR. */ -static rtx -expand_builtin_bswap (machine_mode target_mode, tree exp, rtx target, - rtx subtarget) +static void +expand_builtin_sync_lock_release (machine_mode mode, tree exp) { - tree arg; - rtx op0; + rtx mem; - if (!validate_arglist (exp, INTEGER_TYPE, VOID_TYPE)) - return NULL_RTX; + /* Expand the operands. */ + mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); - arg = CALL_EXPR_ARG (exp, 0); - op0 = expand_expr (arg, - subtarget && GET_MODE (subtarget) == target_mode - ? subtarget : NULL_RTX, - target_mode, EXPAND_NORMAL); - if (GET_MODE (op0) != target_mode) - op0 = convert_to_mode (target_mode, op0, 1); - - target = expand_unop (target_mode, bswap_optab, op0, target, 1); - - gcc_assert (target); - - return convert_to_mode (target_mode, target, 1); + expand_atomic_store (mem, const0_rtx, MEMMODEL_SYNC_RELEASE, true); } -/* Expand a call to a unary builtin in EXP. - Return NULL_RTX if a normal call should be emitted rather than expanding the - function in-line. If convenient, the result should be placed in TARGET. - SUBTARGET may be used as the target for computing one of EXP's operands. */ +/* Given an integer representing an ``enum memmodel'', verify its + correctness and return the memory model enum. */ -static rtx -expand_builtin_unop (machine_mode target_mode, tree exp, rtx target, - rtx subtarget, optab op_optab) +static enum memmodel +get_memmodel (tree exp) { - rtx op0; - - if (!validate_arglist (exp, INTEGER_TYPE, VOID_TYPE)) - return NULL_RTX; + rtx op; + unsigned HOST_WIDE_INT val; + location_t loc + = expansion_point_location_if_in_system_header (input_location); - /* Compute the argument. */ - op0 = expand_expr (CALL_EXPR_ARG (exp, 0), - (subtarget - && (TYPE_MODE (TREE_TYPE (CALL_EXPR_ARG (exp, 0))) - == GET_MODE (subtarget))) ? subtarget : NULL_RTX, - VOIDmode, EXPAND_NORMAL); - /* Compute op, into TARGET if possible. - Set TARGET to wherever the result comes back. */ - target = expand_unop (TYPE_MODE (TREE_TYPE (CALL_EXPR_ARG (exp, 0))), - op_optab, op0, target, op_optab != clrsb_optab); - gcc_assert (target); + /* If the parameter is not a constant, it's a run time value so we'll just + convert it to MEMMODEL_SEQ_CST to avoid annoying runtime checking. */ + if (TREE_CODE (exp) != INTEGER_CST) + return MEMMODEL_SEQ_CST; - return convert_to_mode (target_mode, target, 0); -} + op = expand_normal (exp); -/* Expand a call to __builtin_expect. We just return our argument - as the builtin_expect semantic should've been already executed by - tree branch prediction pass. */ + val = INTVAL (op); + if (targetm.memmodel_check) + val = targetm.memmodel_check (val); + else if (val & ~MEMMODEL_MASK) + { + warning_at (loc, OPT_Winvalid_memory_model, + "unknown architecture specifier in memory model to builtin"); + return MEMMODEL_SEQ_CST; + } -static rtx -expand_builtin_expect (tree exp, rtx target) -{ - tree arg; + /* Should never see a user explicit SYNC memodel model, so >= LAST works. */ + if (memmodel_base (val) >= MEMMODEL_LAST) + { + warning_at (loc, OPT_Winvalid_memory_model, + "invalid memory model argument to builtin"); + return MEMMODEL_SEQ_CST; + } - if (call_expr_nargs (exp) < 2) - return const0_rtx; - arg = CALL_EXPR_ARG (exp, 0); + /* Workaround for Bugzilla 59448. GCC doesn't track consume properly, so + be conservative and promote consume to acquire. */ + if (val == MEMMODEL_CONSUME) + val = MEMMODEL_ACQUIRE; - target = expand_expr (arg, target, VOIDmode, EXPAND_NORMAL); - /* When guessing was done, the hints should be already stripped away. */ - gcc_assert (!flag_guess_branch_prob - || optimize == 0 || seen_error ()); - return target; + return (enum memmodel) val; } -/* Expand a call to __builtin_expect_with_probability. We just return our - argument as the builtin_expect semantic should've been already executed by - tree branch prediction pass. */ +/* Expand the __atomic_exchange intrinsic: + TYPE __atomic_exchange (TYPE *object, TYPE desired, enum memmodel) + EXP is the CALL_EXPR. + TARGET is an optional place for us to store the results. */ static rtx -expand_builtin_expect_with_probability (tree exp, rtx target) +expand_builtin_atomic_exchange (machine_mode mode, tree exp, rtx target) { - tree arg; + rtx val, mem; + enum memmodel model; - if (call_expr_nargs (exp) < 3) - return const0_rtx; - arg = CALL_EXPR_ARG (exp, 0); + model = get_memmodel (CALL_EXPR_ARG (exp, 2)); - target = expand_expr (arg, target, VOIDmode, EXPAND_NORMAL); - /* When guessing was done, the hints should be already stripped away. */ - gcc_assert (!flag_guess_branch_prob - || optimize == 0 || seen_error ()); - return target; -} + if (!flag_inline_atomics) + return NULL_RTX; + + /* Expand the operands. */ + mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); + val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode); + return expand_atomic_exchange (target, mem, val, model); +} -/* Expand a call to __builtin_assume_aligned. We just return our first - argument as the builtin_assume_aligned semantic should've been already - executed by CCP. */ +/* Expand the __atomic_compare_exchange intrinsic: + bool __atomic_compare_exchange (TYPE *object, TYPE *expect, + TYPE desired, BOOL weak, + enum memmodel success, + enum memmodel failure) + EXP is the CALL_EXPR. + TARGET is an optional place for us to store the results. */ static rtx -expand_builtin_assume_aligned (tree exp, rtx target) +expand_builtin_atomic_compare_exchange (machine_mode mode, tree exp, + rtx target) { - if (call_expr_nargs (exp) < 2) - return const0_rtx; - target = expand_expr (CALL_EXPR_ARG (exp, 0), target, VOIDmode, - EXPAND_NORMAL); - gcc_assert (!TREE_SIDE_EFFECTS (CALL_EXPR_ARG (exp, 1)) - && (call_expr_nargs (exp) < 3 - || !TREE_SIDE_EFFECTS (CALL_EXPR_ARG (exp, 2)))); - return target; -} + rtx expect, desired, mem, oldval; + rtx_code_label *label; + enum memmodel success, failure; + tree weak; + bool is_weak; + location_t loc + = expansion_point_location_if_in_system_header (input_location); -void -expand_builtin_trap (void) -{ - if (targetm.have_trap ()) + success = get_memmodel (CALL_EXPR_ARG (exp, 4)); + failure = get_memmodel (CALL_EXPR_ARG (exp, 5)); + + if (failure > success) { - rtx_insn *insn = emit_insn (targetm.gen_trap ()); - /* For trap insns when not accumulating outgoing args force - REG_ARGS_SIZE note to prevent crossjumping of calls with - different args sizes. */ - if (!ACCUMULATE_OUTGOING_ARGS) - add_args_size_note (insn, stack_pointer_delta); + warning_at (loc, OPT_Winvalid_memory_model, + "failure memory model cannot be stronger than success " + "memory model for %<__atomic_compare_exchange%>"); + success = MEMMODEL_SEQ_CST; } - else + + if (is_mm_release (failure) || is_mm_acq_rel (failure)) { - tree fn = builtin_decl_implicit (BUILT_IN_ABORT); - tree call_expr = build_call_expr (fn, 0); - expand_call (call_expr, NULL_RTX, false); + warning_at (loc, OPT_Winvalid_memory_model, + "invalid failure memory model for " + "%<__atomic_compare_exchange%>"); + failure = MEMMODEL_SEQ_CST; + success = MEMMODEL_SEQ_CST; } - emit_barrier (); -} + + if (!flag_inline_atomics) + return NULL_RTX; -/* Expand a call to __builtin_unreachable. We do nothing except emit - a barrier saying that control flow will not pass here. + /* Expand the operands. */ + mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); - It is the responsibility of the program being compiled to ensure - that control flow does never reach __builtin_unreachable. */ -static void -expand_builtin_unreachable (void) -{ - emit_barrier (); -} + expect = expand_normal (CALL_EXPR_ARG (exp, 1)); + expect = convert_memory_address (Pmode, expect); + expect = gen_rtx_MEM (mode, expect); + desired = expand_expr_force_mode (CALL_EXPR_ARG (exp, 2), mode); -/* Expand EXP, a call to fabs, fabsf or fabsl. - Return NULL_RTX if a normal call should be emitted rather than expanding - the function inline. If convenient, the result should be placed - in TARGET. SUBTARGET may be used as the target for computing - the operand. */ + weak = CALL_EXPR_ARG (exp, 3); + is_weak = false; + if (tree_fits_shwi_p (weak) && tree_to_shwi (weak) != 0) + is_weak = true; -static rtx -expand_builtin_fabs (tree exp, rtx target, rtx subtarget) -{ - machine_mode mode; - tree arg; - rtx op0; + if (target == const0_rtx) + target = NULL; - if (!validate_arglist (exp, REAL_TYPE, VOID_TYPE)) + /* Lest the rtl backend create a race condition with an imporoper store + to memory, always create a new pseudo for OLDVAL. */ + oldval = NULL; + + if (!expand_atomic_compare_and_swap (&target, &oldval, mem, expect, desired, + is_weak, success, failure)) return NULL_RTX; - arg = CALL_EXPR_ARG (exp, 0); - CALL_EXPR_ARG (exp, 0) = arg = builtin_save_expr (arg); - mode = TYPE_MODE (TREE_TYPE (arg)); - op0 = expand_expr (arg, subtarget, VOIDmode, EXPAND_NORMAL); - return expand_abs (mode, op0, target, 0, safe_from_p (target, arg, 1)); + /* Conditionally store back to EXPECT, lest we create a race condition + with an improper store to memory. */ + /* ??? With a rearrangement of atomics at the gimple level, we can handle + the normal case where EXPECT is totally private, i.e. a register. At + which point the store can be unconditional. */ + label = gen_label_rtx (); + emit_cmp_and_jump_insns (target, const0_rtx, NE, NULL, + GET_MODE (target), 1, label); + emit_move_insn (expect, oldval); + emit_label (label); + + return target; } -/* Expand EXP, a call to copysign, copysignf, or copysignl. - Return NULL is a normal call should be emitted rather than expanding the - function inline. If convenient, the result should be placed in TARGET. - SUBTARGET may be used as the target for computing the operand. */ +/* Helper function for expand_ifn_atomic_compare_exchange - expand + internal ATOMIC_COMPARE_EXCHANGE call into __atomic_compare_exchange_N + call. The weak parameter must be dropped to match the expected parameter + list and the expected argument changed from value to pointer to memory + slot. */ -static rtx -expand_builtin_copysign (tree exp, rtx target, rtx subtarget) +static void +expand_ifn_atomic_compare_exchange_into_call (gcall *call, machine_mode mode) { - rtx op0, op1; - tree arg; - - if (!validate_arglist (exp, REAL_TYPE, REAL_TYPE, VOID_TYPE)) - return NULL_RTX; + unsigned int z; + vec *vec; - arg = CALL_EXPR_ARG (exp, 0); - op0 = expand_expr (arg, subtarget, VOIDmode, EXPAND_NORMAL); - - arg = CALL_EXPR_ARG (exp, 1); - op1 = expand_normal (arg); - - return expand_copysign (op0, op1, target); + vec_alloc (vec, 5); + vec->quick_push (gimple_call_arg (call, 0)); + tree expected = gimple_call_arg (call, 1); + rtx x = assign_stack_temp_for_type (mode, GET_MODE_SIZE (mode), + TREE_TYPE (expected)); + rtx expd = expand_expr (expected, x, mode, EXPAND_NORMAL); + if (expd != x) + emit_move_insn (x, expd); + tree v = make_tree (TREE_TYPE (expected), x); + vec->quick_push (build1 (ADDR_EXPR, + build_pointer_type (TREE_TYPE (expected)), v)); + vec->quick_push (gimple_call_arg (call, 2)); + /* Skip the boolean weak parameter. */ + for (z = 4; z < 6; z++) + vec->quick_push (gimple_call_arg (call, z)); + /* At present we only have BUILT_IN_ATOMIC_COMPARE_EXCHANGE_{1,2,4,8,16}. */ + unsigned int bytes_log2 = exact_log2 (GET_MODE_SIZE (mode).to_constant ()); + gcc_assert (bytes_log2 < 5); + built_in_function fncode + = (built_in_function) ((int) BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1 + + bytes_log2); + tree fndecl = builtin_decl_explicit (fncode); + tree fn = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (fndecl)), + fndecl); + tree exp = build_call_vec (boolean_type_node, fn, vec); + tree lhs = gimple_call_lhs (call); + rtx boolret = expand_call (exp, NULL_RTX, lhs == NULL_TREE); + if (lhs) + { + rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE); + if (GET_MODE (boolret) != mode) + boolret = convert_modes (mode, GET_MODE (boolret), boolret, 1); + x = force_reg (mode, x); + write_complex_part (target, boolret, true); + write_complex_part (target, x, false); + } } -/* Emit a call to __builtin___clear_cache. */ +/* Expand IFN_ATOMIC_COMPARE_EXCHANGE internal function. */ void -default_emit_call_builtin___clear_cache (rtx begin, rtx end) +expand_ifn_atomic_compare_exchange (gcall *call) { - rtx callee = gen_rtx_SYMBOL_REF (Pmode, - BUILTIN_ASM_NAME_PTR - (BUILT_IN_CLEAR_CACHE)); + int size = tree_to_shwi (gimple_call_arg (call, 3)) & 255; + gcc_assert (size == 1 || size == 2 || size == 4 || size == 8 || size == 16); + machine_mode mode = int_mode_for_size (BITS_PER_UNIT * size, 0).require (); + rtx expect, desired, mem, oldval, boolret; + enum memmodel success, failure; + tree lhs; + bool is_weak; + location_t loc + = expansion_point_location_if_in_system_header (gimple_location (call)); - emit_library_call (callee, - LCT_NORMAL, VOIDmode, - convert_memory_address (ptr_mode, begin), ptr_mode, - convert_memory_address (ptr_mode, end), ptr_mode); -} + success = get_memmodel (gimple_call_arg (call, 4)); + failure = get_memmodel (gimple_call_arg (call, 5)); -/* Emit a call to __builtin___clear_cache, unless the target specifies - it as do-nothing. This function can be used by trampoline - finalizers to duplicate the effects of expanding a call to the - clear_cache builtin. */ + if (failure > success) + { + warning_at (loc, OPT_Winvalid_memory_model, + "failure memory model cannot be stronger than success " + "memory model for %<__atomic_compare_exchange%>"); + success = MEMMODEL_SEQ_CST; + } -void -maybe_emit_call_builtin___clear_cache (rtx begin, rtx end) -{ - if ((GET_MODE (begin) != ptr_mode && GET_MODE (begin) != Pmode) - || (GET_MODE (end) != ptr_mode && GET_MODE (end) != Pmode)) + if (is_mm_release (failure) || is_mm_acq_rel (failure)) { - error ("both arguments to %<__builtin___clear_cache%> must be pointers"); - return; + warning_at (loc, OPT_Winvalid_memory_model, + "invalid failure memory model for " + "%<__atomic_compare_exchange%>"); + failure = MEMMODEL_SEQ_CST; + success = MEMMODEL_SEQ_CST; } - if (targetm.have_clear_cache ()) + if (!flag_inline_atomics) { - /* We have a "clear_cache" insn, and it will handle everything. */ - class expand_operand ops[2]; + expand_ifn_atomic_compare_exchange_into_call (call, mode); + return; + } - create_address_operand (&ops[0], begin); - create_address_operand (&ops[1], end); + /* Expand the operands. */ + mem = get_builtin_sync_mem (gimple_call_arg (call, 0), mode); - if (maybe_expand_insn (targetm.code_for_clear_cache, 2, ops)) - return; - } - else + expect = expand_expr_force_mode (gimple_call_arg (call, 1), mode); + desired = expand_expr_force_mode (gimple_call_arg (call, 2), mode); + + is_weak = (tree_to_shwi (gimple_call_arg (call, 3)) & 256) != 0; + + boolret = NULL; + oldval = NULL; + + if (!expand_atomic_compare_and_swap (&boolret, &oldval, mem, expect, desired, + is_weak, success, failure)) { -#ifndef CLEAR_INSN_CACHE - /* There is no "clear_cache" insn, and __clear_cache() in libgcc - does nothing. There is no need to call it. Do nothing. */ + expand_ifn_atomic_compare_exchange_into_call (call, mode); return; -#endif /* CLEAR_INSN_CACHE */ } - targetm.calls.emit_call_builtin___clear_cache (begin, end); + lhs = gimple_call_lhs (call); + if (lhs) + { + rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE); + if (GET_MODE (boolret) != mode) + boolret = convert_modes (mode, GET_MODE (boolret), boolret, 1); + write_complex_part (target, boolret, true); + write_complex_part (target, oldval, false); + } } -/* Expand a call to __builtin___clear_cache. */ +/* Expand the __atomic_load intrinsic: + TYPE __atomic_load (TYPE *object, enum memmodel) + EXP is the CALL_EXPR. + TARGET is an optional place for us to store the results. */ -static void -expand_builtin___clear_cache (tree exp) +static rtx +expand_builtin_atomic_load (machine_mode mode, tree exp, rtx target) { - tree begin, end; - rtx begin_rtx, end_rtx; + rtx mem; + enum memmodel model; - /* We must not expand to a library call. If we did, any - fallback library function in libgcc that might contain a call to - __builtin___clear_cache() would recurse infinitely. */ - if (!validate_arglist (exp, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) + model = get_memmodel (CALL_EXPR_ARG (exp, 1)); + if (is_mm_release (model) || is_mm_acq_rel (model)) { - error ("both arguments to %<__builtin___clear_cache%> must be pointers"); - return; + location_t loc + = expansion_point_location_if_in_system_header (input_location); + warning_at (loc, OPT_Winvalid_memory_model, + "invalid memory model for %<__atomic_load%>"); + model = MEMMODEL_SEQ_CST; } - begin = CALL_EXPR_ARG (exp, 0); - begin_rtx = expand_expr (begin, NULL_RTX, Pmode, EXPAND_NORMAL); + if (!flag_inline_atomics) + return NULL_RTX; - end = CALL_EXPR_ARG (exp, 1); - end_rtx = expand_expr (end, NULL_RTX, Pmode, EXPAND_NORMAL); + /* Expand the operand. */ + mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); - maybe_emit_call_builtin___clear_cache (begin_rtx, end_rtx); + return expand_atomic_load (target, mem, model); } -/* Given a trampoline address, make sure it satisfies TRAMPOLINE_ALIGNMENT. */ + +/* Expand the __atomic_store intrinsic: + void __atomic_store (TYPE *object, TYPE desired, enum memmodel) + EXP is the CALL_EXPR. + TARGET is an optional place for us to store the results. */ static rtx -round_trampoline_addr (rtx tramp) +expand_builtin_atomic_store (machine_mode mode, tree exp) { - rtx temp, addend, mask; + rtx mem, val; + enum memmodel model; - /* If we don't need too much alignment, we'll have been guaranteed - proper alignment by get_trampoline_type. */ - if (TRAMPOLINE_ALIGNMENT <= STACK_BOUNDARY) - return tramp; + model = get_memmodel (CALL_EXPR_ARG (exp, 2)); + if (!(is_mm_relaxed (model) || is_mm_seq_cst (model) + || is_mm_release (model))) + { + location_t loc + = expansion_point_location_if_in_system_header (input_location); + warning_at (loc, OPT_Winvalid_memory_model, + "invalid memory model for %<__atomic_store%>"); + model = MEMMODEL_SEQ_CST; + } - /* Round address up to desired boundary. */ - temp = gen_reg_rtx (Pmode); - addend = gen_int_mode (TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT - 1, Pmode); - mask = gen_int_mode (-TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT, Pmode); + if (!flag_inline_atomics) + return NULL_RTX; - temp = expand_simple_binop (Pmode, PLUS, tramp, addend, - temp, 0, OPTAB_LIB_WIDEN); - tramp = expand_simple_binop (Pmode, AND, temp, mask, - temp, 0, OPTAB_LIB_WIDEN); + /* Expand the operands. */ + mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); + val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode); - return tramp; + return expand_atomic_store (mem, val, model, false); } +/* Expand the __atomic_fetch_XXX intrinsic: + TYPE __atomic_fetch_XXX (TYPE *object, TYPE val, enum memmodel) + EXP is the CALL_EXPR. + TARGET is an optional place for us to store the results. + CODE is the operation, PLUS, MINUS, ADD, XOR, or IOR. + FETCH_AFTER is true if returning the result of the operation. + FETCH_AFTER is false if returning the value before the operation. + IGNORE is true if the result is not used. + EXT_CALL is the correct builtin for an external call if this cannot be + resolved to an instruction sequence. */ + static rtx -expand_builtin_init_trampoline (tree exp, bool onstack) +expand_builtin_atomic_fetch_op (machine_mode mode, tree exp, rtx target, + enum rtx_code code, bool fetch_after, + bool ignore, enum built_in_function ext_call) { - tree t_tramp, t_func, t_chain; - rtx m_tramp, r_tramp, r_chain, tmp; + rtx val, mem, ret; + enum memmodel model; + tree fndecl; + tree addr; - if (!validate_arglist (exp, POINTER_TYPE, POINTER_TYPE, - POINTER_TYPE, VOID_TYPE)) - return NULL_RTX; + model = get_memmodel (CALL_EXPR_ARG (exp, 2)); - t_tramp = CALL_EXPR_ARG (exp, 0); - t_func = CALL_EXPR_ARG (exp, 1); - t_chain = CALL_EXPR_ARG (exp, 2); + /* Expand the operands. */ + mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); + val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode); - r_tramp = expand_normal (t_tramp); - m_tramp = gen_rtx_MEM (BLKmode, r_tramp); - MEM_NOTRAP_P (m_tramp) = 1; + /* Only try generating instructions if inlining is turned on. */ + if (flag_inline_atomics) + { + ret = expand_atomic_fetch_op (target, mem, val, code, model, fetch_after); + if (ret) + return ret; + } - /* If ONSTACK, the TRAMP argument should be the address of a field - within the local function's FRAME decl. Either way, let's see if - we can fill in the MEM_ATTRs for this memory. */ - if (TREE_CODE (t_tramp) == ADDR_EXPR) - set_mem_attributes (m_tramp, TREE_OPERAND (t_tramp, 0), true); + /* Return if a different routine isn't needed for the library call. */ + if (ext_call == BUILT_IN_NONE) + return NULL_RTX; - /* Creator of a heap trampoline is responsible for making sure the - address is aligned to at least STACK_BOUNDARY. Normally malloc - will ensure this anyhow. */ - tmp = round_trampoline_addr (r_tramp); - if (tmp != r_tramp) - { - m_tramp = change_address (m_tramp, BLKmode, tmp); - set_mem_align (m_tramp, TRAMPOLINE_ALIGNMENT); - set_mem_size (m_tramp, TRAMPOLINE_SIZE); - } + /* Change the call to the specified function. */ + fndecl = get_callee_fndecl (exp); + addr = CALL_EXPR_FN (exp); + STRIP_NOPS (addr); - /* The FUNC argument should be the address of the nested function. - Extract the actual function decl to pass to the hook. */ - gcc_assert (TREE_CODE (t_func) == ADDR_EXPR); - t_func = TREE_OPERAND (t_func, 0); - gcc_assert (TREE_CODE (t_func) == FUNCTION_DECL); + gcc_assert (TREE_OPERAND (addr, 0) == fndecl); + TREE_OPERAND (addr, 0) = builtin_decl_explicit (ext_call); - r_chain = expand_normal (t_chain); + /* If we will emit code after the call, the call cannot be a tail call. + If it is emitted as a tail call, a barrier is emitted after it, and + then all trailing code is removed. */ + if (!ignore) + CALL_EXPR_TAILCALL (exp) = 0; - /* Generate insns to initialize the trampoline. */ - targetm.calls.trampoline_init (m_tramp, t_func, r_chain); + /* Expand the call here so we can emit trailing code. */ + ret = expand_call (exp, target, ignore); - if (onstack) - { - trampolines_created = 1; + /* Replace the original function just in case it matters. */ + TREE_OPERAND (addr, 0) = fndecl; - if (targetm.calls.custom_function_descriptors != 0) - warning_at (DECL_SOURCE_LOCATION (t_func), OPT_Wtrampolines, - "trampoline generated for nested function %qD", t_func); + /* Then issue the arithmetic correction to return the right result. */ + if (!ignore) + { + if (code == NOT) + { + ret = expand_simple_binop (mode, AND, ret, val, NULL_RTX, true, + OPTAB_LIB_WIDEN); + ret = expand_simple_unop (mode, NOT, ret, target, true); + } + else + ret = expand_simple_binop (mode, code, ret, val, target, true, + OPTAB_LIB_WIDEN); } - - return const0_rtx; + return ret; } -static rtx -expand_builtin_adjust_trampoline (tree exp) +/* Expand IFN_ATOMIC_BIT_TEST_AND_* internal function. */ + +void +expand_ifn_atomic_bit_test_and (gcall *call) { - rtx tramp; + tree ptr = gimple_call_arg (call, 0); + tree bit = gimple_call_arg (call, 1); + tree flag = gimple_call_arg (call, 2); + tree lhs = gimple_call_lhs (call); + enum memmodel model = MEMMODEL_SYNC_SEQ_CST; + machine_mode mode = TYPE_MODE (TREE_TYPE (flag)); + enum rtx_code code; + optab optab; + class expand_operand ops[5]; - if (!validate_arglist (exp, POINTER_TYPE, VOID_TYPE)) - return NULL_RTX; + gcc_assert (flag_inline_atomics); - tramp = expand_normal (CALL_EXPR_ARG (exp, 0)); - tramp = round_trampoline_addr (tramp); - if (targetm.calls.trampoline_adjust_address) - tramp = targetm.calls.trampoline_adjust_address (tramp); + if (gimple_call_num_args (call) == 4) + model = get_memmodel (gimple_call_arg (call, 3)); - return tramp; -} + rtx mem = get_builtin_sync_mem (ptr, mode); + rtx val = expand_expr_force_mode (bit, mode); -/* Expand a call to the builtin descriptor initialization routine. - A descriptor is made up of a couple of pointers to the static - chain and the code entry in this order. */ + switch (gimple_call_internal_fn (call)) + { + case IFN_ATOMIC_BIT_TEST_AND_SET: + code = IOR; + optab = atomic_bit_test_and_set_optab; + break; + case IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT: + code = XOR; + optab = atomic_bit_test_and_complement_optab; + break; + case IFN_ATOMIC_BIT_TEST_AND_RESET: + code = AND; + optab = atomic_bit_test_and_reset_optab; + break; + default: + gcc_unreachable (); + } -static rtx -expand_builtin_init_descriptor (tree exp) -{ - tree t_descr, t_func, t_chain; - rtx m_descr, r_descr, r_func, r_chain; + if (lhs == NULL_TREE) + { + val = expand_simple_binop (mode, ASHIFT, const1_rtx, + val, NULL_RTX, true, OPTAB_DIRECT); + if (code == AND) + val = expand_simple_unop (mode, NOT, val, NULL_RTX, true); + expand_atomic_fetch_op (const0_rtx, mem, val, code, model, false); + return; + } - if (!validate_arglist (exp, POINTER_TYPE, POINTER_TYPE, POINTER_TYPE, - VOID_TYPE)) - return NULL_RTX; + rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE); + enum insn_code icode = direct_optab_handler (optab, mode); + gcc_assert (icode != CODE_FOR_nothing); + create_output_operand (&ops[0], target, mode); + create_fixed_operand (&ops[1], mem); + create_convert_operand_to (&ops[2], val, mode, true); + create_integer_operand (&ops[3], model); + create_integer_operand (&ops[4], integer_onep (flag)); + if (maybe_expand_insn (icode, 5, ops)) + return; - t_descr = CALL_EXPR_ARG (exp, 0); - t_func = CALL_EXPR_ARG (exp, 1); - t_chain = CALL_EXPR_ARG (exp, 2); + rtx bitval = val; + val = expand_simple_binop (mode, ASHIFT, const1_rtx, + val, NULL_RTX, true, OPTAB_DIRECT); + rtx maskval = val; + if (code == AND) + val = expand_simple_unop (mode, NOT, val, NULL_RTX, true); + rtx result = expand_atomic_fetch_op (gen_reg_rtx (mode), mem, val, + code, model, false); + if (integer_onep (flag)) + { + result = expand_simple_binop (mode, ASHIFTRT, result, bitval, + NULL_RTX, true, OPTAB_DIRECT); + result = expand_simple_binop (mode, AND, result, const1_rtx, target, + true, OPTAB_DIRECT); + } + else + result = expand_simple_binop (mode, AND, result, maskval, target, true, + OPTAB_DIRECT); + if (result != target) + emit_move_insn (target, result); +} - r_descr = expand_normal (t_descr); - m_descr = gen_rtx_MEM (BLKmode, r_descr); - MEM_NOTRAP_P (m_descr) = 1; - set_mem_align (m_descr, GET_MODE_ALIGNMENT (ptr_mode)); +/* Expand an atomic clear operation. + void _atomic_clear (BOOL *obj, enum memmodel) + EXP is the call expression. */ - r_func = expand_normal (t_func); - r_chain = expand_normal (t_chain); +static rtx +expand_builtin_atomic_clear (tree exp) +{ + machine_mode mode; + rtx mem, ret; + enum memmodel model; - /* Generate insns to initialize the descriptor. */ - emit_move_insn (adjust_address_nv (m_descr, ptr_mode, 0), r_chain); - emit_move_insn (adjust_address_nv (m_descr, ptr_mode, - POINTER_SIZE / BITS_PER_UNIT), r_func); + mode = int_mode_for_size (BOOL_TYPE_SIZE, 0).require (); + mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); + model = get_memmodel (CALL_EXPR_ARG (exp, 1)); + + if (is_mm_consume (model) || is_mm_acquire (model) || is_mm_acq_rel (model)) + { + location_t loc + = expansion_point_location_if_in_system_header (input_location); + warning_at (loc, OPT_Winvalid_memory_model, + "invalid memory model for %<__atomic_store%>"); + model = MEMMODEL_SEQ_CST; + } + /* Try issuing an __atomic_store, and allow fallback to __sync_lock_release. + Failing that, a store is issued by __atomic_store. The only way this can + fail is if the bool type is larger than a word size. Unlikely, but + handle it anyway for completeness. Assume a single threaded model since + there is no atomic support in this case, and no barriers are required. */ + ret = expand_atomic_store (mem, const0_rtx, model, true); + if (!ret) + emit_move_insn (mem, const0_rtx); return const0_rtx; } -/* Expand a call to the builtin descriptor adjustment routine. */ +/* Expand an atomic test_and_set operation. + bool _atomic_test_and_set (BOOL *obj, enum memmodel) + EXP is the call expression. */ static rtx -expand_builtin_adjust_descriptor (tree exp) +expand_builtin_atomic_test_and_set (tree exp, rtx target) { - rtx tramp; + rtx mem; + enum memmodel model; + machine_mode mode; - if (!validate_arglist (exp, POINTER_TYPE, VOID_TYPE)) - return NULL_RTX; + mode = int_mode_for_size (BOOL_TYPE_SIZE, 0).require (); + mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); + model = get_memmodel (CALL_EXPR_ARG (exp, 1)); - tramp = expand_normal (CALL_EXPR_ARG (exp, 0)); + return expand_atomic_test_and_set (target, mem, model); +} - /* Unalign the descriptor to allow runtime identification. */ - tramp = plus_constant (ptr_mode, tramp, - targetm.calls.custom_function_descriptors); - return force_operand (tramp, NULL_RTX); -} +/* Return true if (optional) argument ARG1 of size ARG0 is always lock free on + this architecture. If ARG1 is NULL, use typical alignment for size ARG0. */ -/* Expand the call EXP to the built-in signbit, signbitf or signbitl - function. The function first checks whether the back end provides - an insn to implement signbit for the respective mode. If not, it - checks whether the floating point format of the value is such that - the sign bit can be extracted. If that is not the case, error out. - EXP is the expression that is a call to the builtin function; if - convenient, the result should be placed in TARGET. */ -static rtx -expand_builtin_signbit (tree exp, rtx target) +static tree +fold_builtin_atomic_always_lock_free (tree arg0, tree arg1) { - const struct real_format *fmt; - scalar_float_mode fmode; - scalar_int_mode rmode, imode; - tree arg; - int word, bitpos; - enum insn_code icode; - rtx temp; - location_t loc = EXPR_LOCATION (exp); - - if (!validate_arglist (exp, REAL_TYPE, VOID_TYPE)) - return NULL_RTX; + int size; + machine_mode mode; + unsigned int mode_align, type_align; - arg = CALL_EXPR_ARG (exp, 0); - fmode = SCALAR_FLOAT_TYPE_MODE (TREE_TYPE (arg)); - rmode = SCALAR_INT_TYPE_MODE (TREE_TYPE (exp)); - fmt = REAL_MODE_FORMAT (fmode); + if (TREE_CODE (arg0) != INTEGER_CST) + return NULL_TREE; - arg = builtin_save_expr (arg); + /* We need a corresponding integer mode for the access to be lock-free. */ + size = INTVAL (expand_normal (arg0)) * BITS_PER_UNIT; + if (!int_mode_for_size (size, 0).exists (&mode)) + return boolean_false_node; - /* Expand the argument yielding a RTX expression. */ - temp = expand_normal (arg); + mode_align = GET_MODE_ALIGNMENT (mode); - /* Check if the back end provides an insn that handles signbit for the - argument's mode. */ - icode = optab_handler (signbit_optab, fmode); - if (icode != CODE_FOR_nothing) + if (TREE_CODE (arg1) == INTEGER_CST) { - rtx_insn *last = get_last_insn (); - target = gen_reg_rtx (TYPE_MODE (TREE_TYPE (exp))); - if (maybe_emit_unop_insn (icode, target, temp, UNKNOWN)) - return target; - delete_insns_since (last); - } - - /* For floating point formats without a sign bit, implement signbit - as "ARG < 0.0". */ - bitpos = fmt->signbit_ro; - if (bitpos < 0) - { - /* But we can't do this if the format supports signed zero. */ - gcc_assert (!fmt->has_signed_zero || !HONOR_SIGNED_ZEROS (fmode)); + unsigned HOST_WIDE_INT val = UINTVAL (expand_normal (arg1)); - arg = fold_build2_loc (loc, LT_EXPR, TREE_TYPE (exp), arg, - build_real (TREE_TYPE (arg), dconst0)); - return expand_expr (arg, target, VOIDmode, EXPAND_NORMAL); - } + /* Either this argument is null, or it's a fake pointer encoding + the alignment of the object. */ + val = least_bit_hwi (val); + val *= BITS_PER_UNIT; - if (GET_MODE_SIZE (fmode) <= UNITS_PER_WORD) - { - imode = int_mode_for_mode (fmode).require (); - temp = gen_lowpart (imode, temp); + if (val == 0 || mode_align < val) + type_align = mode_align; + else + type_align = val; } else { - imode = word_mode; - /* Handle targets with different FP word orders. */ - if (FLOAT_WORDS_BIG_ENDIAN) - word = (GET_MODE_BITSIZE (fmode) - bitpos) / BITS_PER_WORD; - else - word = bitpos / BITS_PER_WORD; - temp = operand_subword_force (temp, word, fmode); - bitpos = bitpos % BITS_PER_WORD; - } - - /* Force the intermediate word_mode (or narrower) result into a - register. This avoids attempting to create paradoxical SUBREGs - of floating point modes below. */ - temp = force_reg (imode, temp); + tree ttype = TREE_TYPE (arg1); - /* If the bitpos is within the "result mode" lowpart, the operation - can be implement with a single bitwise AND. Otherwise, we need - a right shift and an AND. */ + /* This function is usually invoked and folded immediately by the front + end before anything else has a chance to look at it. The pointer + parameter at this point is usually cast to a void *, so check for that + and look past the cast. */ + if (CONVERT_EXPR_P (arg1) + && POINTER_TYPE_P (ttype) + && VOID_TYPE_P (TREE_TYPE (ttype)) + && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg1, 0)))) + arg1 = TREE_OPERAND (arg1, 0); - if (bitpos < GET_MODE_BITSIZE (rmode)) - { - wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (rmode)); + ttype = TREE_TYPE (arg1); + gcc_assert (POINTER_TYPE_P (ttype)); - if (GET_MODE_SIZE (imode) > GET_MODE_SIZE (rmode)) - temp = gen_lowpart (rmode, temp); - temp = expand_binop (rmode, and_optab, temp, - immed_wide_int_const (mask, rmode), - NULL_RTX, 1, OPTAB_LIB_WIDEN); - } - else - { - /* Perform a logical right shift to place the signbit in the least - significant bit, then truncate the result to the desired mode - and mask just this bit. */ - temp = expand_shift (RSHIFT_EXPR, imode, temp, bitpos, NULL_RTX, 1); - temp = gen_lowpart (rmode, temp); - temp = expand_binop (rmode, and_optab, temp, const1_rtx, - NULL_RTX, 1, OPTAB_LIB_WIDEN); + /* Get the underlying type of the object. */ + ttype = TREE_TYPE (ttype); + type_align = TYPE_ALIGN (ttype); } - return temp; + /* If the object has smaller alignment, the lock free routines cannot + be used. */ + if (type_align < mode_align) + return boolean_false_node; + + /* Check if a compare_and_swap pattern exists for the mode which represents + the required size. The pattern is not allowed to fail, so the existence + of the pattern indicates support is present. Also require that an + atomic load exists for the required size. */ + if (can_compare_and_swap_p (mode, true) && can_atomic_load_p (mode)) + return boolean_true_node; + else + return boolean_false_node; } -/* Expand fork or exec calls. TARGET is the desired target of the - call. EXP is the call. FN is the - identificator of the actual function. IGNORE is nonzero if the - value is to be ignored. */ +/* Return true if the parameters to call EXP represent an object which will + always generate lock free instructions. The first argument represents the + size of the object, and the second parameter is a pointer to the object + itself. If NULL is passed for the object, then the result is based on + typical alignment for an object of the specified size. Otherwise return + false. */ static rtx -expand_builtin_fork_or_exec (tree fn, tree exp, rtx target, int ignore) +expand_builtin_atomic_always_lock_free (tree exp) { - tree id, decl; - tree call; + tree size; + tree arg0 = CALL_EXPR_ARG (exp, 0); + tree arg1 = CALL_EXPR_ARG (exp, 1); - if (DECL_FUNCTION_CODE (fn) != BUILT_IN_FORK) + if (TREE_CODE (arg0) != INTEGER_CST) { - tree path = CALL_EXPR_ARG (exp, 0); - /* Detect unterminated path. */ - if (!check_read_access (exp, path)) - return NULL_RTX; - - /* Also detect unterminated first argument. */ - switch (DECL_FUNCTION_CODE (fn)) - { - case BUILT_IN_EXECL: - case BUILT_IN_EXECLE: - case BUILT_IN_EXECLP: - if (!check_read_access (exp, path)) - return NULL_RTX; - default: - break; - } + error ("non-constant argument 1 to %qs", "__atomic_always_lock_free"); + return const0_rtx; } + size = fold_builtin_atomic_always_lock_free (arg0, arg1); + if (size == boolean_true_node) + return const1_rtx; + return const0_rtx; +} - /* If we are not profiling, just call the function. */ - if (!profile_arc_flag) - return NULL_RTX; - - /* Otherwise call the wrapper. This should be equivalent for the rest of - compiler, so the code does not diverge, and the wrapper may run the - code necessary for keeping the profiling sane. */ - - switch (DECL_FUNCTION_CODE (fn)) - { - case BUILT_IN_FORK: - id = get_identifier ("__gcov_fork"); - break; - - case BUILT_IN_EXECL: - id = get_identifier ("__gcov_execl"); - break; - - case BUILT_IN_EXECV: - id = get_identifier ("__gcov_execv"); - break; +/* Return a one or zero if it can be determined that object ARG1 of size ARG + is lock free on this architecture. */ - case BUILT_IN_EXECLP: - id = get_identifier ("__gcov_execlp"); - break; +static tree +fold_builtin_atomic_is_lock_free (tree arg0, tree arg1) +{ + if (!flag_inline_atomics) + return NULL_TREE; + + /* If it isn't always lock free, don't generate a result. */ + if (fold_builtin_atomic_always_lock_free (arg0, arg1) == boolean_true_node) + return boolean_true_node; - case BUILT_IN_EXECLE: - id = get_identifier ("__gcov_execle"); - break; + return NULL_TREE; +} - case BUILT_IN_EXECVP: - id = get_identifier ("__gcov_execvp"); - break; +/* Return true if the parameters to call EXP represent an object which will + always generate lock free instructions. The first argument represents the + size of the object, and the second parameter is a pointer to the object + itself. If NULL is passed for the object, then the result is based on + typical alignment for an object of the specified size. Otherwise return + NULL*/ - case BUILT_IN_EXECVE: - id = get_identifier ("__gcov_execve"); - break; +static rtx +expand_builtin_atomic_is_lock_free (tree exp) +{ + tree size; + tree arg0 = CALL_EXPR_ARG (exp, 0); + tree arg1 = CALL_EXPR_ARG (exp, 1); - default: - gcc_unreachable (); + if (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))) + { + error ("non-integer argument 1 to %qs", "__atomic_is_lock_free"); + return NULL_RTX; } - decl = build_decl (DECL_SOURCE_LOCATION (fn), - FUNCTION_DECL, id, TREE_TYPE (fn)); - DECL_EXTERNAL (decl) = 1; - TREE_PUBLIC (decl) = 1; - DECL_ARTIFICIAL (decl) = 1; - TREE_NOTHROW (decl) = 1; - DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT; - DECL_VISIBILITY_SPECIFIED (decl) = 1; - call = rewrite_call_expr (EXPR_LOCATION (exp), exp, 0, decl, 0); - return expand_call (call, target, ignore); - } + if (!flag_inline_atomics) + return NULL_RTX; + /* If the value is known at compile time, return the RTX for it. */ + size = fold_builtin_atomic_is_lock_free (arg0, arg1); + if (size == boolean_true_node) + return const1_rtx; - -/* Reconstitute a mode for a __sync intrinsic operation. Since the type of - the pointer in these functions is void*, the tree optimizers may remove - casts. The mode computed in expand_builtin isn't reliable either, due - to __sync_bool_compare_and_swap. + return NULL_RTX; +} - FCODE_DIFF should be fcode - base, where base is the FOO_1 code for the - group of builtins. This gives us log2 of the mode size. */ +/* Expand the __atomic_thread_fence intrinsic: + void __atomic_thread_fence (enum memmodel) + EXP is the CALL_EXPR. */ -static inline machine_mode -get_builtin_sync_mode (int fcode_diff) +static void +expand_builtin_atomic_thread_fence (tree exp) { - /* The size is not negotiable, so ask not to get BLKmode in return - if the target indicates that a smaller size would be better. */ - return int_mode_for_size (BITS_PER_UNIT << fcode_diff, 0).require (); + enum memmodel model = get_memmodel (CALL_EXPR_ARG (exp, 0)); + expand_mem_thread_fence (model); } -/* Expand the memory expression LOC and return the appropriate memory operand - for the builtin_sync operations. */ +/* Expand the __atomic_signal_fence intrinsic: + void __atomic_signal_fence (enum memmodel) + EXP is the CALL_EXPR. */ -static rtx -get_builtin_sync_mem (tree loc, machine_mode mode) +static void +expand_builtin_atomic_signal_fence (tree exp) { - rtx addr, mem; - int addr_space = TYPE_ADDR_SPACE (POINTER_TYPE_P (TREE_TYPE (loc)) - ? TREE_TYPE (TREE_TYPE (loc)) - : TREE_TYPE (loc)); - scalar_int_mode addr_mode = targetm.addr_space.address_mode (addr_space); - - addr = expand_expr (loc, NULL_RTX, addr_mode, EXPAND_SUM); - addr = convert_memory_address (addr_mode, addr); - - /* Note that we explicitly do not want any alias information for this - memory, so that we kill all other live memories. Otherwise we don't - satisfy the full barrier semantics of the intrinsic. */ - mem = gen_rtx_MEM (mode, addr); - - set_mem_addr_space (mem, addr_space); - - mem = validize_mem (mem); + enum memmodel model = get_memmodel (CALL_EXPR_ARG (exp, 0)); + expand_mem_signal_fence (model); +} - /* The alignment needs to be at least according to that of the mode. */ - set_mem_align (mem, MAX (GET_MODE_ALIGNMENT (mode), - get_pointer_alignment (loc))); - set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER); - MEM_VOLATILE_P (mem) = 1; +/* Expand the __sync_synchronize intrinsic. */ - return mem; +static void +expand_builtin_sync_synchronize (void) +{ + expand_mem_thread_fence (MEMMODEL_SYNC_SEQ_CST); } -/* Make sure an argument is in the right mode. - EXP is the tree argument. - MODE is the mode it should be in. */ - static rtx -expand_expr_force_mode (tree exp, machine_mode mode) +expand_builtin_thread_pointer (tree exp, rtx target) { - rtx val; - machine_mode old_mode; - - if (TREE_CODE (exp) == SSA_NAME - && TYPE_MODE (TREE_TYPE (exp)) != mode) + enum insn_code icode; + if (!validate_arglist (exp, VOID_TYPE)) + return const0_rtx; + icode = direct_optab_handler (get_thread_pointer_optab, Pmode); + if (icode != CODE_FOR_nothing) { - /* Undo argument promotion if possible, as combine might not - be able to do it later due to MEM_VOLATILE_P uses in the - patterns. */ - gimple *g = get_gimple_for_ssa_name (exp); - if (g && gimple_assign_cast_p (g)) - { - tree rhs = gimple_assign_rhs1 (g); - tree_code code = gimple_assign_rhs_code (g); - if (CONVERT_EXPR_CODE_P (code) - && TYPE_MODE (TREE_TYPE (rhs)) == mode - && INTEGRAL_TYPE_P (TREE_TYPE (exp)) - && INTEGRAL_TYPE_P (TREE_TYPE (rhs)) - && (TYPE_PRECISION (TREE_TYPE (exp)) - > TYPE_PRECISION (TREE_TYPE (rhs)))) - exp = rhs; - } + class expand_operand op; + /* If the target is not sutitable then create a new target. */ + if (target == NULL_RTX + || !REG_P (target) + || GET_MODE (target) != Pmode) + target = gen_reg_rtx (Pmode); + create_output_operand (&op, target, Pmode); + expand_insn (icode, 1, &op); + return target; } - - val = expand_expr (exp, NULL_RTX, mode, EXPAND_NORMAL); - /* If VAL is promoted to a wider mode, convert it back to MODE. Take care - of CONST_INTs, where we know the old_mode only from the call argument. */ - - old_mode = GET_MODE (val); - if (old_mode == VOIDmode) - old_mode = TYPE_MODE (TREE_TYPE (exp)); - val = convert_modes (mode, old_mode, val, 1); - return val; + error ("%<__builtin_thread_pointer%> is not supported on this target"); + return const0_rtx; } - -/* Expand the __sync_xxx_and_fetch and __sync_fetch_and_xxx intrinsics. - EXP is the CALL_EXPR. CODE is the rtx code - that corresponds to the arithmetic or logical operation from the name; - an exception here is that NOT actually means NAND. TARGET is an optional - place for us to store the results; AFTER is true if this is the - fetch_and_xxx form. */ - -static rtx -expand_builtin_sync_operation (machine_mode mode, tree exp, - enum rtx_code code, bool after, - rtx target) +static void +expand_builtin_set_thread_pointer (tree exp) { - rtx val, mem; - location_t loc = EXPR_LOCATION (exp); - - if (code == NOT && warn_sync_nand) + enum insn_code icode; + if (!validate_arglist (exp, POINTER_TYPE, VOID_TYPE)) + return; + icode = direct_optab_handler (set_thread_pointer_optab, Pmode); + if (icode != CODE_FOR_nothing) { - tree fndecl = get_callee_fndecl (exp); - enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); + class expand_operand op; + rtx val = expand_expr (CALL_EXPR_ARG (exp, 0), NULL_RTX, + Pmode, EXPAND_NORMAL); + create_input_operand (&op, val, Pmode); + expand_insn (icode, 1, &op); + return; + } + error ("%<__builtin_set_thread_pointer%> is not supported on this target"); +} - static bool warned_f_a_n, warned_n_a_f; + +/* Emit code to restore the current value of stack. */ - switch (fcode) - { - case BUILT_IN_SYNC_FETCH_AND_NAND_1: - case BUILT_IN_SYNC_FETCH_AND_NAND_2: - case BUILT_IN_SYNC_FETCH_AND_NAND_4: - case BUILT_IN_SYNC_FETCH_AND_NAND_8: - case BUILT_IN_SYNC_FETCH_AND_NAND_16: - if (warned_f_a_n) - break; +static void +expand_stack_restore (tree var) +{ + rtx_insn *prev; + rtx sa = expand_normal (var); - fndecl = builtin_decl_implicit (BUILT_IN_SYNC_FETCH_AND_NAND_N); - inform (loc, "%qD changed semantics in GCC 4.4", fndecl); - warned_f_a_n = true; - break; + sa = convert_memory_address (Pmode, sa); - case BUILT_IN_SYNC_NAND_AND_FETCH_1: - case BUILT_IN_SYNC_NAND_AND_FETCH_2: - case BUILT_IN_SYNC_NAND_AND_FETCH_4: - case BUILT_IN_SYNC_NAND_AND_FETCH_8: - case BUILT_IN_SYNC_NAND_AND_FETCH_16: - if (warned_n_a_f) - break; + prev = get_last_insn (); + emit_stack_restore (SAVE_BLOCK, sa); - fndecl = builtin_decl_implicit (BUILT_IN_SYNC_NAND_AND_FETCH_N); - inform (loc, "%qD changed semantics in GCC 4.4", fndecl); - warned_n_a_f = true; - break; + record_new_stack_level (); - default: - gcc_unreachable (); - } - } + fixup_args_size_notes (prev, get_last_insn (), 0); +} - /* Expand the operands. */ - mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); - val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode); +/* Emit code to save the current value of stack. */ - return expand_atomic_fetch_op (target, mem, val, code, MEMMODEL_SYNC_SEQ_CST, - after); +static rtx +expand_stack_save (void) +{ + rtx ret = NULL_RTX; + + emit_stack_save (SAVE_BLOCK, &ret); + return ret; } -/* Expand the __sync_val_compare_and_swap and __sync_bool_compare_and_swap - intrinsics. EXP is the CALL_EXPR. IS_BOOL is - true if this is the boolean form. TARGET is a place for us to store the - results; this is NOT optional if IS_BOOL is true. */ +/* Emit code to get the openacc gang, worker or vector id or size. */ static rtx -expand_builtin_compare_and_swap (machine_mode mode, tree exp, - bool is_bool, rtx target) +expand_builtin_goacc_parlevel_id_size (tree exp, rtx target, int ignore) { - rtx old_val, new_val, mem; - rtx *pbool, *poval; + const char *name; + rtx fallback_retval; + rtx_insn *(*gen_fn) (rtx, rtx); + switch (DECL_FUNCTION_CODE (get_callee_fndecl (exp))) + { + case BUILT_IN_GOACC_PARLEVEL_ID: + name = "__builtin_goacc_parlevel_id"; + fallback_retval = const0_rtx; + gen_fn = targetm.gen_oacc_dim_pos; + break; + case BUILT_IN_GOACC_PARLEVEL_SIZE: + name = "__builtin_goacc_parlevel_size"; + fallback_retval = const1_rtx; + gen_fn = targetm.gen_oacc_dim_size; + break; + default: + gcc_unreachable (); + } - /* Expand the operands. */ - mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); - old_val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode); - new_val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 2), mode); + if (oacc_get_fn_attrib (current_function_decl) == NULL_TREE) + { + error ("%qs only supported in OpenACC code", name); + return const0_rtx; + } - pbool = poval = NULL; - if (target != const0_rtx) + tree arg = CALL_EXPR_ARG (exp, 0); + if (TREE_CODE (arg) != INTEGER_CST) { - if (is_bool) - pbool = ⌖ - else - poval = ⌖ + error ("non-constant argument 0 to %qs", name); + return const0_rtx; } - if (!expand_atomic_compare_and_swap (pbool, poval, mem, old_val, new_val, - false, MEMMODEL_SYNC_SEQ_CST, - MEMMODEL_SYNC_SEQ_CST)) - return NULL_RTX; - return target; -} + int dim = TREE_INT_CST_LOW (arg); + switch (dim) + { + case GOMP_DIM_GANG: + case GOMP_DIM_WORKER: + case GOMP_DIM_VECTOR: + break; + default: + error ("illegal argument 0 to %qs", name); + return const0_rtx; + } -/* Expand the __sync_lock_test_and_set intrinsic. Note that the most - general form is actually an atomic exchange, and some targets only - support a reduced form with the second argument being a constant 1. - EXP is the CALL_EXPR; TARGET is an optional place for us to store - the results. */ + if (ignore) + return target; -static rtx -expand_builtin_sync_lock_test_and_set (machine_mode mode, tree exp, - rtx target) -{ - rtx val, mem; + if (target == NULL_RTX) + target = gen_reg_rtx (TYPE_MODE (TREE_TYPE (exp))); - /* Expand the operands. */ - mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); - val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode); + if (!targetm.have_oacc_dim_size ()) + { + emit_move_insn (target, fallback_retval); + return target; + } - return expand_sync_lock_test_and_set (target, mem, val); -} - -/* Expand the __sync_lock_release intrinsic. EXP is the CALL_EXPR. */ + rtx reg = MEM_P (target) ? gen_reg_rtx (GET_MODE (target)) : target; + emit_insn (gen_fn (reg, GEN_INT (dim))); + if (reg != target) + emit_move_insn (target, reg); -static void -expand_builtin_sync_lock_release (machine_mode mode, tree exp) -{ - rtx mem; + return target; +} - /* Expand the operands. */ - mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); +/* Expand a string compare operation using a sequence of char comparison + to get rid of the calling overhead, with result going to TARGET if + that's convenient. - expand_atomic_store (mem, const0_rtx, MEMMODEL_SYNC_RELEASE, true); -} + VAR_STR is the variable string source; + CONST_STR is the constant string source; + LENGTH is the number of chars to compare; + CONST_STR_N indicates which source string is the constant string; + IS_MEMCMP indicates whether it's a memcmp or strcmp. + + to: (assume const_str_n is 2, i.e., arg2 is a constant string) -/* Given an integer representing an ``enum memmodel'', verify its - correctness and return the memory model enum. */ + target = (int) (unsigned char) var_str[0] + - (int) (unsigned char) const_str[0]; + if (target != 0) + goto ne_label; + ... + target = (int) (unsigned char) var_str[length - 2] + - (int) (unsigned char) const_str[length - 2]; + if (target != 0) + goto ne_label; + target = (int) (unsigned char) var_str[length - 1] + - (int) (unsigned char) const_str[length - 1]; + ne_label: + */ -static enum memmodel -get_memmodel (tree exp) +static rtx +inline_string_cmp (rtx target, tree var_str, const char *const_str, + unsigned HOST_WIDE_INT length, + int const_str_n, machine_mode mode) { - rtx op; - unsigned HOST_WIDE_INT val; - location_t loc - = expansion_point_location_if_in_system_header (input_location); - - /* If the parameter is not a constant, it's a run time value so we'll just - convert it to MEMMODEL_SEQ_CST to avoid annoying runtime checking. */ - if (TREE_CODE (exp) != INTEGER_CST) - return MEMMODEL_SEQ_CST; + HOST_WIDE_INT offset = 0; + rtx var_rtx_array + = get_memory_rtx (var_str, build_int_cst (unsigned_type_node,length)); + rtx var_rtx = NULL_RTX; + rtx const_rtx = NULL_RTX; + rtx result = target ? target : gen_reg_rtx (mode); + rtx_code_label *ne_label = gen_label_rtx (); + tree unit_type_node = unsigned_char_type_node; + scalar_int_mode unit_mode + = as_a TYPE_MODE (unit_type_node); - op = expand_normal (exp); + start_sequence (); - val = INTVAL (op); - if (targetm.memmodel_check) - val = targetm.memmodel_check (val); - else if (val & ~MEMMODEL_MASK) + for (unsigned HOST_WIDE_INT i = 0; i < length; i++) { - warning_at (loc, OPT_Winvalid_memory_model, - "unknown architecture specifier in memory model to builtin"); - return MEMMODEL_SEQ_CST; - } + var_rtx + = adjust_address (var_rtx_array, TYPE_MODE (unit_type_node), offset); + const_rtx = c_readstr (const_str + offset, unit_mode); + rtx op0 = (const_str_n == 1) ? const_rtx : var_rtx; + rtx op1 = (const_str_n == 1) ? var_rtx : const_rtx; - /* Should never see a user explicit SYNC memodel model, so >= LAST works. */ - if (memmodel_base (val) >= MEMMODEL_LAST) - { - warning_at (loc, OPT_Winvalid_memory_model, - "invalid memory model argument to builtin"); - return MEMMODEL_SEQ_CST; + op0 = convert_modes (mode, unit_mode, op0, 1); + op1 = convert_modes (mode, unit_mode, op1, 1); + result = expand_simple_binop (mode, MINUS, op0, op1, + result, 1, OPTAB_WIDEN); + if (i < length - 1) + emit_cmp_and_jump_insns (result, CONST0_RTX (mode), NE, NULL_RTX, + mode, true, ne_label); + offset += GET_MODE_SIZE (unit_mode); } - /* Workaround for Bugzilla 59448. GCC doesn't track consume properly, so - be conservative and promote consume to acquire. */ - if (val == MEMMODEL_CONSUME) - val = MEMMODEL_ACQUIRE; + emit_label (ne_label); + rtx_insn *insns = get_insns (); + end_sequence (); + emit_insn (insns); - return (enum memmodel) val; + return result; } -/* Expand the __atomic_exchange intrinsic: - TYPE __atomic_exchange (TYPE *object, TYPE desired, enum memmodel) - EXP is the CALL_EXPR. - TARGET is an optional place for us to store the results. */ +/* Inline expansion of a call to str(n)cmp and memcmp, with result going + to TARGET if that's convenient. + If the call is not been inlined, return NULL_RTX. */ static rtx -expand_builtin_atomic_exchange (machine_mode mode, tree exp, rtx target) +inline_expand_builtin_bytecmp (tree exp, rtx target) { - rtx val, mem; - enum memmodel model; - - model = get_memmodel (CALL_EXPR_ARG (exp, 2)); + tree fndecl = get_callee_fndecl (exp); + enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); + bool is_ncmp = (fcode == BUILT_IN_STRNCMP || fcode == BUILT_IN_MEMCMP); - if (!flag_inline_atomics) + /* Do NOT apply this inlining expansion when optimizing for size or + optimization level below 2. */ + if (optimize < 2 || optimize_insn_for_size_p ()) return NULL_RTX; - /* Expand the operands. */ - mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); - val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode); + gcc_checking_assert (fcode == BUILT_IN_STRCMP + || fcode == BUILT_IN_STRNCMP + || fcode == BUILT_IN_MEMCMP); - return expand_atomic_exchange (target, mem, val, model); -} + /* On a target where the type of the call (int) has same or narrower presicion + than unsigned char, give up the inlining expansion. */ + if (TYPE_PRECISION (unsigned_char_type_node) + >= TYPE_PRECISION (TREE_TYPE (exp))) + return NULL_RTX; -/* Expand the __atomic_compare_exchange intrinsic: - bool __atomic_compare_exchange (TYPE *object, TYPE *expect, - TYPE desired, BOOL weak, - enum memmodel success, - enum memmodel failure) - EXP is the CALL_EXPR. - TARGET is an optional place for us to store the results. */ + tree arg1 = CALL_EXPR_ARG (exp, 0); + tree arg2 = CALL_EXPR_ARG (exp, 1); + tree len3_tree = is_ncmp ? CALL_EXPR_ARG (exp, 2) : NULL_TREE; -static rtx -expand_builtin_atomic_compare_exchange (machine_mode mode, tree exp, - rtx target) -{ - rtx expect, desired, mem, oldval; - rtx_code_label *label; - enum memmodel success, failure; - tree weak; - bool is_weak; - location_t loc - = expansion_point_location_if_in_system_header (input_location); + unsigned HOST_WIDE_INT len1 = 0; + unsigned HOST_WIDE_INT len2 = 0; + unsigned HOST_WIDE_INT len3 = 0; - success = get_memmodel (CALL_EXPR_ARG (exp, 4)); - failure = get_memmodel (CALL_EXPR_ARG (exp, 5)); + /* Get the object representation of the initializers of ARG1 and ARG2 + as strings, provided they refer to constant objects, with their byte + sizes in LEN1 and LEN2, respectively. */ + const char *bytes1 = getbyterep (arg1, &len1); + const char *bytes2 = getbyterep (arg2, &len2); - if (failure > success) + /* Fail if neither argument refers to an initialized constant. */ + if (!bytes1 && !bytes2) + return NULL_RTX; + + if (is_ncmp) { - warning_at (loc, OPT_Winvalid_memory_model, - "failure memory model cannot be stronger than success " - "memory model for %<__atomic_compare_exchange%>"); - success = MEMMODEL_SEQ_CST; + /* Fail if the memcmp/strncmp bound is not a constant. */ + if (!tree_fits_uhwi_p (len3_tree)) + return NULL_RTX; + + len3 = tree_to_uhwi (len3_tree); + + if (fcode == BUILT_IN_MEMCMP) + { + /* Fail if the memcmp bound is greater than the size of either + of the two constant objects. */ + if ((bytes1 && len1 < len3) + || (bytes2 && len2 < len3)) + return NULL_RTX; + } } - - if (is_mm_release (failure) || is_mm_acq_rel (failure)) + + if (fcode != BUILT_IN_MEMCMP) { - warning_at (loc, OPT_Winvalid_memory_model, - "invalid failure memory model for " - "%<__atomic_compare_exchange%>"); - failure = MEMMODEL_SEQ_CST; - success = MEMMODEL_SEQ_CST; + /* For string functions (i.e., strcmp and strncmp) reduce LEN1 + and LEN2 to the length of the nul-terminated string stored + in each. */ + if (bytes1 != NULL) + len1 = strnlen (bytes1, len1) + 1; + if (bytes2 != NULL) + len2 = strnlen (bytes2, len2) + 1; } - - if (!flag_inline_atomics) - return NULL_RTX; + /* See inline_string_cmp. */ + int const_str_n; + if (!len1) + const_str_n = 2; + else if (!len2) + const_str_n = 1; + else if (len2 > len1) + const_str_n = 1; + else + const_str_n = 2; - /* Expand the operands. */ - mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); + /* For strncmp only, compute the new bound as the smallest of + the lengths of the two strings (plus 1) and the bound provided + to the function. */ + unsigned HOST_WIDE_INT bound = (const_str_n == 1) ? len1 : len2; + if (is_ncmp && len3 < bound) + bound = len3; - expect = expand_normal (CALL_EXPR_ARG (exp, 1)); - expect = convert_memory_address (Pmode, expect); - expect = gen_rtx_MEM (mode, expect); - desired = expand_expr_force_mode (CALL_EXPR_ARG (exp, 2), mode); + /* If the bound of the comparison is larger than the threshold, + do nothing. */ + if (bound > (unsigned HOST_WIDE_INT) param_builtin_string_cmp_inline_length) + return NULL_RTX; - weak = CALL_EXPR_ARG (exp, 3); - is_weak = false; - if (tree_fits_shwi_p (weak) && tree_to_shwi (weak) != 0) - is_weak = true; + machine_mode mode = TYPE_MODE (TREE_TYPE (exp)); - if (target == const0_rtx) - target = NULL; + /* Now, start inline expansion the call. */ + return inline_string_cmp (target, (const_str_n == 1) ? arg2 : arg1, + (const_str_n == 1) ? bytes1 : bytes2, bound, + const_str_n, mode); +} - /* Lest the rtl backend create a race condition with an imporoper store - to memory, always create a new pseudo for OLDVAL. */ - oldval = NULL; - - if (!expand_atomic_compare_and_swap (&target, &oldval, mem, expect, desired, - is_weak, success, failure)) - return NULL_RTX; - - /* Conditionally store back to EXPECT, lest we create a race condition - with an improper store to memory. */ - /* ??? With a rearrangement of atomics at the gimple level, we can handle - the normal case where EXPECT is totally private, i.e. a register. At - which point the store can be unconditional. */ - label = gen_label_rtx (); - emit_cmp_and_jump_insns (target, const0_rtx, NE, NULL, - GET_MODE (target), 1, label); - emit_move_insn (expect, oldval); - emit_label (label); - - return target; -} - -/* Helper function for expand_ifn_atomic_compare_exchange - expand - internal ATOMIC_COMPARE_EXCHANGE call into __atomic_compare_exchange_N - call. The weak parameter must be dropped to match the expected parameter - list and the expected argument changed from value to pointer to memory - slot. */ - -static void -expand_ifn_atomic_compare_exchange_into_call (gcall *call, machine_mode mode) -{ - unsigned int z; - vec *vec; - - vec_alloc (vec, 5); - vec->quick_push (gimple_call_arg (call, 0)); - tree expected = gimple_call_arg (call, 1); - rtx x = assign_stack_temp_for_type (mode, GET_MODE_SIZE (mode), - TREE_TYPE (expected)); - rtx expd = expand_expr (expected, x, mode, EXPAND_NORMAL); - if (expd != x) - emit_move_insn (x, expd); - tree v = make_tree (TREE_TYPE (expected), x); - vec->quick_push (build1 (ADDR_EXPR, - build_pointer_type (TREE_TYPE (expected)), v)); - vec->quick_push (gimple_call_arg (call, 2)); - /* Skip the boolean weak parameter. */ - for (z = 4; z < 6; z++) - vec->quick_push (gimple_call_arg (call, z)); - /* At present we only have BUILT_IN_ATOMIC_COMPARE_EXCHANGE_{1,2,4,8,16}. */ - unsigned int bytes_log2 = exact_log2 (GET_MODE_SIZE (mode).to_constant ()); - gcc_assert (bytes_log2 < 5); - built_in_function fncode - = (built_in_function) ((int) BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1 - + bytes_log2); - tree fndecl = builtin_decl_explicit (fncode); - tree fn = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (fndecl)), - fndecl); - tree exp = build_call_vec (boolean_type_node, fn, vec); - tree lhs = gimple_call_lhs (call); - rtx boolret = expand_call (exp, NULL_RTX, lhs == NULL_TREE); - if (lhs) - { - rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE); - if (GET_MODE (boolret) != mode) - boolret = convert_modes (mode, GET_MODE (boolret), boolret, 1); - x = force_reg (mode, x); - write_complex_part (target, boolret, true); - write_complex_part (target, x, false); - } -} - -/* Expand IFN_ATOMIC_COMPARE_EXCHANGE internal function. */ - -void -expand_ifn_atomic_compare_exchange (gcall *call) +/* Expand a call to __builtin_speculation_safe_value_. MODE + represents the size of the first argument to that call, or VOIDmode + if the argument is a pointer. IGNORE will be true if the result + isn't used. */ +static rtx +expand_speculation_safe_value (machine_mode mode, tree exp, rtx target, + bool ignore) { - int size = tree_to_shwi (gimple_call_arg (call, 3)) & 255; - gcc_assert (size == 1 || size == 2 || size == 4 || size == 8 || size == 16); - machine_mode mode = int_mode_for_size (BITS_PER_UNIT * size, 0).require (); - rtx expect, desired, mem, oldval, boolret; - enum memmodel success, failure; - tree lhs; - bool is_weak; - location_t loc - = expansion_point_location_if_in_system_header (gimple_location (call)); + rtx val, failsafe; + unsigned nargs = call_expr_nargs (exp); - success = get_memmodel (gimple_call_arg (call, 4)); - failure = get_memmodel (gimple_call_arg (call, 5)); + tree arg0 = CALL_EXPR_ARG (exp, 0); - if (failure > success) + if (mode == VOIDmode) { - warning_at (loc, OPT_Winvalid_memory_model, - "failure memory model cannot be stronger than success " - "memory model for %<__atomic_compare_exchange%>"); - success = MEMMODEL_SEQ_CST; + mode = TYPE_MODE (TREE_TYPE (arg0)); + gcc_assert (GET_MODE_CLASS (mode) == MODE_INT); } - if (is_mm_release (failure) || is_mm_acq_rel (failure)) - { - warning_at (loc, OPT_Winvalid_memory_model, - "invalid failure memory model for " - "%<__atomic_compare_exchange%>"); - failure = MEMMODEL_SEQ_CST; - success = MEMMODEL_SEQ_CST; - } + val = expand_expr (arg0, NULL_RTX, mode, EXPAND_NORMAL); - if (!flag_inline_atomics) + /* An optional second argument can be used as a failsafe value on + some machines. If it isn't present, then the failsafe value is + assumed to be 0. */ + if (nargs > 1) { - expand_ifn_atomic_compare_exchange_into_call (call, mode); - return; + tree arg1 = CALL_EXPR_ARG (exp, 1); + failsafe = expand_expr (arg1, NULL_RTX, mode, EXPAND_NORMAL); } + else + failsafe = const0_rtx; - /* Expand the operands. */ - mem = get_builtin_sync_mem (gimple_call_arg (call, 0), mode); - - expect = expand_expr_force_mode (gimple_call_arg (call, 1), mode); - desired = expand_expr_force_mode (gimple_call_arg (call, 2), mode); - - is_weak = (tree_to_shwi (gimple_call_arg (call, 3)) & 256) != 0; + /* If the result isn't used, the behavior is undefined. It would be + nice to emit a warning here, but path splitting means this might + happen with legitimate code. So simply drop the builtin + expansion in that case; we've handled any side-effects above. */ + if (ignore) + return const0_rtx; - boolret = NULL; - oldval = NULL; + /* If we don't have a suitable target, create one to hold the result. */ + if (target == NULL || GET_MODE (target) != mode) + target = gen_reg_rtx (mode); - if (!expand_atomic_compare_and_swap (&boolret, &oldval, mem, expect, desired, - is_weak, success, failure)) - { - expand_ifn_atomic_compare_exchange_into_call (call, mode); - return; - } + if (GET_MODE (val) != mode && GET_MODE (val) != VOIDmode) + val = convert_modes (mode, VOIDmode, val, false); - lhs = gimple_call_lhs (call); - if (lhs) - { - rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE); - if (GET_MODE (boolret) != mode) - boolret = convert_modes (mode, GET_MODE (boolret), boolret, 1); - write_complex_part (target, boolret, true); - write_complex_part (target, oldval, false); - } + return targetm.speculation_safe_value (mode, target, val, failsafe); } -/* Expand the __atomic_load intrinsic: - TYPE __atomic_load (TYPE *object, enum memmodel) - EXP is the CALL_EXPR. - TARGET is an optional place for us to store the results. */ +/* Expand an expression EXP that calls a built-in function, + with result going to TARGET if that's convenient + (and in mode MODE if that's convenient). + SUBTARGET may be used as the target for computing one of EXP's operands. + IGNORE is nonzero if the value is to be ignored. */ -static rtx -expand_builtin_atomic_load (machine_mode mode, tree exp, rtx target) +rtx +expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode, + int ignore) { - rtx mem; - enum memmodel model; - - model = get_memmodel (CALL_EXPR_ARG (exp, 1)); - if (is_mm_release (model) || is_mm_acq_rel (model)) - { - location_t loc - = expansion_point_location_if_in_system_header (input_location); - warning_at (loc, OPT_Winvalid_memory_model, - "invalid memory model for %<__atomic_load%>"); - model = MEMMODEL_SEQ_CST; - } - - if (!flag_inline_atomics) - return NULL_RTX; + tree fndecl = get_callee_fndecl (exp); + machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp)); + int flags; - /* Expand the operand. */ - mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); + if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD) + return targetm.expand_builtin (exp, target, subtarget, mode, ignore); - return expand_atomic_load (target, mem, model); -} + /* When ASan is enabled, we don't want to expand some memory/string + builtins and rely on libsanitizer's hooks. This allows us to avoid + redundant checks and be sure, that possible overflow will be detected + by ASan. */ + enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); + if ((flag_sanitize & SANITIZE_ADDRESS) && asan_intercepted_p (fcode)) + return expand_call (exp, target, ignore); -/* Expand the __atomic_store intrinsic: - void __atomic_store (TYPE *object, TYPE desired, enum memmodel) - EXP is the CALL_EXPR. - TARGET is an optional place for us to store the results. */ + /* When not optimizing, generate calls to library functions for a certain + set of builtins. */ + if (!optimize + && !called_as_built_in (fndecl) + && fcode != BUILT_IN_FORK + && fcode != BUILT_IN_EXECL + && fcode != BUILT_IN_EXECV + && fcode != BUILT_IN_EXECLP + && fcode != BUILT_IN_EXECLE + && fcode != BUILT_IN_EXECVP + && fcode != BUILT_IN_EXECVE + && fcode != BUILT_IN_CLEAR_CACHE + && !ALLOCA_FUNCTION_CODE_P (fcode) + && fcode != BUILT_IN_FREE) + return expand_call (exp, target, ignore); -static rtx -expand_builtin_atomic_store (machine_mode mode, tree exp) -{ - rtx mem, val; - enum memmodel model; + /* The built-in function expanders test for target == const0_rtx + to determine whether the function's result will be ignored. */ + if (ignore) + target = const0_rtx; - model = get_memmodel (CALL_EXPR_ARG (exp, 2)); - if (!(is_mm_relaxed (model) || is_mm_seq_cst (model) - || is_mm_release (model))) + /* If the result of a pure or const built-in function is ignored, and + none of its arguments are volatile, we can avoid expanding the + built-in call and just evaluate the arguments for side-effects. */ + if (target == const0_rtx + && ((flags = flags_from_decl_or_type (fndecl)) & (ECF_CONST | ECF_PURE)) + && !(flags & ECF_LOOPING_CONST_OR_PURE)) { - location_t loc - = expansion_point_location_if_in_system_header (input_location); - warning_at (loc, OPT_Winvalid_memory_model, - "invalid memory model for %<__atomic_store%>"); - model = MEMMODEL_SEQ_CST; - } - - if (!flag_inline_atomics) - return NULL_RTX; - - /* Expand the operands. */ - mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); - val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode); + bool volatilep = false; + tree arg; + call_expr_arg_iterator iter; - return expand_atomic_store (mem, val, model, false); -} + FOR_EACH_CALL_EXPR_ARG (arg, iter, exp) + if (TREE_THIS_VOLATILE (arg)) + { + volatilep = true; + break; + } -/* Expand the __atomic_fetch_XXX intrinsic: - TYPE __atomic_fetch_XXX (TYPE *object, TYPE val, enum memmodel) - EXP is the CALL_EXPR. - TARGET is an optional place for us to store the results. - CODE is the operation, PLUS, MINUS, ADD, XOR, or IOR. - FETCH_AFTER is true if returning the result of the operation. - FETCH_AFTER is false if returning the value before the operation. - IGNORE is true if the result is not used. - EXT_CALL is the correct builtin for an external call if this cannot be - resolved to an instruction sequence. */ + if (! volatilep) + { + FOR_EACH_CALL_EXPR_ARG (arg, iter, exp) + expand_expr (arg, const0_rtx, VOIDmode, EXPAND_NORMAL); + return const0_rtx; + } + } -static rtx -expand_builtin_atomic_fetch_op (machine_mode mode, tree exp, rtx target, - enum rtx_code code, bool fetch_after, - bool ignore, enum built_in_function ext_call) -{ - rtx val, mem, ret; - enum memmodel model; - tree fndecl; - tree addr; + switch (fcode) + { + CASE_FLT_FN (BUILT_IN_FABS): + CASE_FLT_FN_FLOATN_NX (BUILT_IN_FABS): + case BUILT_IN_FABSD32: + case BUILT_IN_FABSD64: + case BUILT_IN_FABSD128: + target = expand_builtin_fabs (exp, target, subtarget); + if (target) + return target; + break; - model = get_memmodel (CALL_EXPR_ARG (exp, 2)); + CASE_FLT_FN (BUILT_IN_COPYSIGN): + CASE_FLT_FN_FLOATN_NX (BUILT_IN_COPYSIGN): + target = expand_builtin_copysign (exp, target, subtarget); + if (target) + return target; + break; - /* Expand the operands. */ - mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); - val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode); + /* Just do a normal library call if we were unable to fold + the values. */ + CASE_FLT_FN (BUILT_IN_CABS): + break; - /* Only try generating instructions if inlining is turned on. */ - if (flag_inline_atomics) - { - ret = expand_atomic_fetch_op (target, mem, val, code, model, fetch_after); - if (ret) - return ret; - } + CASE_FLT_FN (BUILT_IN_FMA): + CASE_FLT_FN_FLOATN_NX (BUILT_IN_FMA): + target = expand_builtin_mathfn_ternary (exp, target, subtarget); + if (target) + return target; + break; - /* Return if a different routine isn't needed for the library call. */ - if (ext_call == BUILT_IN_NONE) - return NULL_RTX; + CASE_FLT_FN (BUILT_IN_ILOGB): + if (! flag_unsafe_math_optimizations) + break; + gcc_fallthrough (); + CASE_FLT_FN (BUILT_IN_ISINF): + CASE_FLT_FN (BUILT_IN_FINITE): + case BUILT_IN_ISFINITE: + case BUILT_IN_ISNORMAL: + target = expand_builtin_interclass_mathfn (exp, target); + if (target) + return target; + break; - /* Change the call to the specified function. */ - fndecl = get_callee_fndecl (exp); - addr = CALL_EXPR_FN (exp); - STRIP_NOPS (addr); + CASE_FLT_FN (BUILT_IN_ICEIL): + CASE_FLT_FN (BUILT_IN_LCEIL): + CASE_FLT_FN (BUILT_IN_LLCEIL): + CASE_FLT_FN (BUILT_IN_LFLOOR): + CASE_FLT_FN (BUILT_IN_IFLOOR): + CASE_FLT_FN (BUILT_IN_LLFLOOR): + target = expand_builtin_int_roundingfn (exp, target); + if (target) + return target; + break; - gcc_assert (TREE_OPERAND (addr, 0) == fndecl); - TREE_OPERAND (addr, 0) = builtin_decl_explicit (ext_call); + CASE_FLT_FN (BUILT_IN_IRINT): + CASE_FLT_FN (BUILT_IN_LRINT): + CASE_FLT_FN (BUILT_IN_LLRINT): + CASE_FLT_FN (BUILT_IN_IROUND): + CASE_FLT_FN (BUILT_IN_LROUND): + CASE_FLT_FN (BUILT_IN_LLROUND): + target = expand_builtin_int_roundingfn_2 (exp, target); + if (target) + return target; + break; - /* If we will emit code after the call, the call cannot be a tail call. - If it is emitted as a tail call, a barrier is emitted after it, and - then all trailing code is removed. */ - if (!ignore) - CALL_EXPR_TAILCALL (exp) = 0; + CASE_FLT_FN (BUILT_IN_POWI): + target = expand_builtin_powi (exp, target); + if (target) + return target; + break; - /* Expand the call here so we can emit trailing code. */ - ret = expand_call (exp, target, ignore); + CASE_FLT_FN (BUILT_IN_CEXPI): + target = expand_builtin_cexpi (exp, target); + gcc_assert (target); + return target; - /* Replace the original function just in case it matters. */ - TREE_OPERAND (addr, 0) = fndecl; + CASE_FLT_FN (BUILT_IN_SIN): + CASE_FLT_FN (BUILT_IN_COS): + if (! flag_unsafe_math_optimizations) + break; + target = expand_builtin_mathfn_3 (exp, target, subtarget); + if (target) + return target; + break; - /* Then issue the arithmetic correction to return the right result. */ - if (!ignore) - { - if (code == NOT) - { - ret = expand_simple_binop (mode, AND, ret, val, NULL_RTX, true, - OPTAB_LIB_WIDEN); - ret = expand_simple_unop (mode, NOT, ret, target, true); - } + CASE_FLT_FN (BUILT_IN_SINCOS): + if (! flag_unsafe_math_optimizations) + break; + target = expand_builtin_sincos (exp); + if (target) + return target; + break; + + case BUILT_IN_APPLY_ARGS: + return expand_builtin_apply_args (); + + /* __builtin_apply (FUNCTION, ARGUMENTS, ARGSIZE) invokes + FUNCTION with a copy of the parameters described by + ARGUMENTS, and ARGSIZE. It returns a block of memory + allocated on the stack into which is stored all the registers + that might possibly be used for returning the result of a + function. ARGUMENTS is the value returned by + __builtin_apply_args. ARGSIZE is the number of bytes of + arguments that must be copied. ??? How should this value be + computed? We'll also need a safe worst case value for varargs + functions. */ + case BUILT_IN_APPLY: + if (!validate_arglist (exp, POINTER_TYPE, + POINTER_TYPE, INTEGER_TYPE, VOID_TYPE) + && !validate_arglist (exp, REFERENCE_TYPE, + POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) + return const0_rtx; else - ret = expand_simple_binop (mode, code, ret, val, target, true, - OPTAB_LIB_WIDEN); - } - return ret; -} + { + rtx ops[3]; -/* Expand IFN_ATOMIC_BIT_TEST_AND_* internal function. */ + ops[0] = expand_normal (CALL_EXPR_ARG (exp, 0)); + ops[1] = expand_normal (CALL_EXPR_ARG (exp, 1)); + ops[2] = expand_normal (CALL_EXPR_ARG (exp, 2)); -void -expand_ifn_atomic_bit_test_and (gcall *call) -{ - tree ptr = gimple_call_arg (call, 0); - tree bit = gimple_call_arg (call, 1); - tree flag = gimple_call_arg (call, 2); - tree lhs = gimple_call_lhs (call); - enum memmodel model = MEMMODEL_SYNC_SEQ_CST; - machine_mode mode = TYPE_MODE (TREE_TYPE (flag)); - enum rtx_code code; - optab optab; - class expand_operand ops[5]; + return expand_builtin_apply (ops[0], ops[1], ops[2]); + } - gcc_assert (flag_inline_atomics); + /* __builtin_return (RESULT) causes the function to return the + value described by RESULT. RESULT is address of the block of + memory returned by __builtin_apply. */ + case BUILT_IN_RETURN: + if (validate_arglist (exp, POINTER_TYPE, VOID_TYPE)) + expand_builtin_return (expand_normal (CALL_EXPR_ARG (exp, 0))); + return const0_rtx; - if (gimple_call_num_args (call) == 4) - model = get_memmodel (gimple_call_arg (call, 3)); + case BUILT_IN_SAVEREGS: + return expand_builtin_saveregs (); - rtx mem = get_builtin_sync_mem (ptr, mode); - rtx val = expand_expr_force_mode (bit, mode); + case BUILT_IN_VA_ARG_PACK: + /* All valid uses of __builtin_va_arg_pack () are removed during + inlining. */ + error ("invalid use of %<__builtin_va_arg_pack ()%>"); + return const0_rtx; - switch (gimple_call_internal_fn (call)) - { - case IFN_ATOMIC_BIT_TEST_AND_SET: - code = IOR; - optab = atomic_bit_test_and_set_optab; - break; - case IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT: - code = XOR; - optab = atomic_bit_test_and_complement_optab; - break; - case IFN_ATOMIC_BIT_TEST_AND_RESET: - code = AND; - optab = atomic_bit_test_and_reset_optab; - break; - default: - gcc_unreachable (); - } + case BUILT_IN_VA_ARG_PACK_LEN: + /* All valid uses of __builtin_va_arg_pack_len () are removed during + inlining. */ + error ("invalid use of %<__builtin_va_arg_pack_len ()%>"); + return const0_rtx; - if (lhs == NULL_TREE) - { - val = expand_simple_binop (mode, ASHIFT, const1_rtx, - val, NULL_RTX, true, OPTAB_DIRECT); - if (code == AND) - val = expand_simple_unop (mode, NOT, val, NULL_RTX, true); - expand_atomic_fetch_op (const0_rtx, mem, val, code, model, false); - return; - } + /* Return the address of the first anonymous stack arg. */ + case BUILT_IN_NEXT_ARG: + if (fold_builtin_next_arg (exp, false)) + return const0_rtx; + return expand_builtin_next_arg (); - rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE); - enum insn_code icode = direct_optab_handler (optab, mode); - gcc_assert (icode != CODE_FOR_nothing); - create_output_operand (&ops[0], target, mode); - create_fixed_operand (&ops[1], mem); - create_convert_operand_to (&ops[2], val, mode, true); - create_integer_operand (&ops[3], model); - create_integer_operand (&ops[4], integer_onep (flag)); - if (maybe_expand_insn (icode, 5, ops)) - return; + case BUILT_IN_CLEAR_CACHE: + expand_builtin___clear_cache (exp); + return const0_rtx; - rtx bitval = val; - val = expand_simple_binop (mode, ASHIFT, const1_rtx, - val, NULL_RTX, true, OPTAB_DIRECT); - rtx maskval = val; - if (code == AND) - val = expand_simple_unop (mode, NOT, val, NULL_RTX, true); - rtx result = expand_atomic_fetch_op (gen_reg_rtx (mode), mem, val, - code, model, false); - if (integer_onep (flag)) - { - result = expand_simple_binop (mode, ASHIFTRT, result, bitval, - NULL_RTX, true, OPTAB_DIRECT); - result = expand_simple_binop (mode, AND, result, const1_rtx, target, - true, OPTAB_DIRECT); - } - else - result = expand_simple_binop (mode, AND, result, maskval, target, true, - OPTAB_DIRECT); - if (result != target) - emit_move_insn (target, result); -} + case BUILT_IN_CLASSIFY_TYPE: + return expand_builtin_classify_type (exp); -/* Expand an atomic clear operation. - void _atomic_clear (BOOL *obj, enum memmodel) - EXP is the call expression. */ + case BUILT_IN_CONSTANT_P: + return const0_rtx; -static rtx -expand_builtin_atomic_clear (tree exp) -{ - machine_mode mode; - rtx mem, ret; - enum memmodel model; + case BUILT_IN_FRAME_ADDRESS: + case BUILT_IN_RETURN_ADDRESS: + return expand_builtin_frame_address (fndecl, exp); - mode = int_mode_for_size (BOOL_TYPE_SIZE, 0).require (); - mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); - model = get_memmodel (CALL_EXPR_ARG (exp, 1)); + /* Returns the address of the area where the structure is returned. + 0 otherwise. */ + case BUILT_IN_AGGREGATE_INCOMING_ADDRESS: + if (call_expr_nargs (exp) != 0 + || ! AGGREGATE_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl))) + || !MEM_P (DECL_RTL (DECL_RESULT (current_function_decl)))) + return const0_rtx; + else + return XEXP (DECL_RTL (DECL_RESULT (current_function_decl)), 0); - if (is_mm_consume (model) || is_mm_acquire (model) || is_mm_acq_rel (model)) - { - location_t loc - = expansion_point_location_if_in_system_header (input_location); - warning_at (loc, OPT_Winvalid_memory_model, - "invalid memory model for %<__atomic_store%>"); - model = MEMMODEL_SEQ_CST; - } + CASE_BUILT_IN_ALLOCA: + target = expand_builtin_alloca (exp); + if (target) + return target; + break; - /* Try issuing an __atomic_store, and allow fallback to __sync_lock_release. - Failing that, a store is issued by __atomic_store. The only way this can - fail is if the bool type is larger than a word size. Unlikely, but - handle it anyway for completeness. Assume a single threaded model since - there is no atomic support in this case, and no barriers are required. */ - ret = expand_atomic_store (mem, const0_rtx, model, true); - if (!ret) - emit_move_insn (mem, const0_rtx); - return const0_rtx; -} + case BUILT_IN_ASAN_ALLOCAS_UNPOISON: + return expand_asan_emit_allocas_unpoison (exp); -/* Expand an atomic test_and_set operation. - bool _atomic_test_and_set (BOOL *obj, enum memmodel) - EXP is the call expression. */ + case BUILT_IN_STACK_SAVE: + return expand_stack_save (); -static rtx -expand_builtin_atomic_test_and_set (tree exp, rtx target) -{ - rtx mem; - enum memmodel model; - machine_mode mode; + case BUILT_IN_STACK_RESTORE: + expand_stack_restore (CALL_EXPR_ARG (exp, 0)); + return const0_rtx; - mode = int_mode_for_size (BOOL_TYPE_SIZE, 0).require (); - mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); - model = get_memmodel (CALL_EXPR_ARG (exp, 1)); + case BUILT_IN_BSWAP16: + case BUILT_IN_BSWAP32: + case BUILT_IN_BSWAP64: + case BUILT_IN_BSWAP128: + target = expand_builtin_bswap (target_mode, exp, target, subtarget); + if (target) + return target; + break; - return expand_atomic_test_and_set (target, mem, model); -} + CASE_INT_FN (BUILT_IN_FFS): + target = expand_builtin_unop (target_mode, exp, target, + subtarget, ffs_optab); + if (target) + return target; + break; + CASE_INT_FN (BUILT_IN_CLZ): + target = expand_builtin_unop (target_mode, exp, target, + subtarget, clz_optab); + if (target) + return target; + break; -/* Return true if (optional) argument ARG1 of size ARG0 is always lock free on - this architecture. If ARG1 is NULL, use typical alignment for size ARG0. */ + CASE_INT_FN (BUILT_IN_CTZ): + target = expand_builtin_unop (target_mode, exp, target, + subtarget, ctz_optab); + if (target) + return target; + break; -static tree -fold_builtin_atomic_always_lock_free (tree arg0, tree arg1) -{ - int size; - machine_mode mode; - unsigned int mode_align, type_align; + CASE_INT_FN (BUILT_IN_CLRSB): + target = expand_builtin_unop (target_mode, exp, target, + subtarget, clrsb_optab); + if (target) + return target; + break; - if (TREE_CODE (arg0) != INTEGER_CST) - return NULL_TREE; + CASE_INT_FN (BUILT_IN_POPCOUNT): + target = expand_builtin_unop (target_mode, exp, target, + subtarget, popcount_optab); + if (target) + return target; + break; - /* We need a corresponding integer mode for the access to be lock-free. */ - size = INTVAL (expand_normal (arg0)) * BITS_PER_UNIT; - if (!int_mode_for_size (size, 0).exists (&mode)) - return boolean_false_node; + CASE_INT_FN (BUILT_IN_PARITY): + target = expand_builtin_unop (target_mode, exp, target, + subtarget, parity_optab); + if (target) + return target; + break; - mode_align = GET_MODE_ALIGNMENT (mode); + case BUILT_IN_STRLEN: + target = expand_builtin_strlen (exp, target, target_mode); + if (target) + return target; + break; - if (TREE_CODE (arg1) == INTEGER_CST) - { - unsigned HOST_WIDE_INT val = UINTVAL (expand_normal (arg1)); + case BUILT_IN_STRNLEN: + target = expand_builtin_strnlen (exp, target, target_mode); + if (target) + return target; + break; - /* Either this argument is null, or it's a fake pointer encoding - the alignment of the object. */ - val = least_bit_hwi (val); - val *= BITS_PER_UNIT; + case BUILT_IN_STRCAT: + target = expand_builtin_strcat (exp); + if (target) + return target; + break; - if (val == 0 || mode_align < val) - type_align = mode_align; - else - type_align = val; - } - else - { - tree ttype = TREE_TYPE (arg1); + case BUILT_IN_GETTEXT: + case BUILT_IN_PUTS: + case BUILT_IN_PUTS_UNLOCKED: + case BUILT_IN_STRDUP: + if (validate_arglist (exp, POINTER_TYPE, VOID_TYPE)) + check_read_access (exp, CALL_EXPR_ARG (exp, 0)); + break; - /* This function is usually invoked and folded immediately by the front - end before anything else has a chance to look at it. The pointer - parameter at this point is usually cast to a void *, so check for that - and look past the cast. */ - if (CONVERT_EXPR_P (arg1) - && POINTER_TYPE_P (ttype) - && VOID_TYPE_P (TREE_TYPE (ttype)) - && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg1, 0)))) - arg1 = TREE_OPERAND (arg1, 0); + case BUILT_IN_INDEX: + case BUILT_IN_RINDEX: + case BUILT_IN_STRCHR: + case BUILT_IN_STRRCHR: + if (validate_arglist (exp, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) + check_read_access (exp, CALL_EXPR_ARG (exp, 0)); + break; - ttype = TREE_TYPE (arg1); - gcc_assert (POINTER_TYPE_P (ttype)); + case BUILT_IN_FPUTS: + case BUILT_IN_FPUTS_UNLOCKED: + if (validate_arglist (exp, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) + check_read_access (exp, CALL_EXPR_ARG (exp, 0)); + break; - /* Get the underlying type of the object. */ - ttype = TREE_TYPE (ttype); - type_align = TYPE_ALIGN (ttype); - } + case BUILT_IN_STRNDUP: + if (validate_arglist (exp, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) + check_read_access (exp, CALL_EXPR_ARG (exp, 0), CALL_EXPR_ARG (exp, 1)); + break; - /* If the object has smaller alignment, the lock free routines cannot - be used. */ - if (type_align < mode_align) - return boolean_false_node; + case BUILT_IN_STRCASECMP: + case BUILT_IN_STRPBRK: + case BUILT_IN_STRSPN: + case BUILT_IN_STRCSPN: + case BUILT_IN_STRSTR: + if (validate_arglist (exp, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) + { + check_read_access (exp, CALL_EXPR_ARG (exp, 0)); + check_read_access (exp, CALL_EXPR_ARG (exp, 1)); + } + break; - /* Check if a compare_and_swap pattern exists for the mode which represents - the required size. The pattern is not allowed to fail, so the existence - of the pattern indicates support is present. Also require that an - atomic load exists for the required size. */ - if (can_compare_and_swap_p (mode, true) && can_atomic_load_p (mode)) - return boolean_true_node; - else - return boolean_false_node; -} - -/* Return true if the parameters to call EXP represent an object which will - always generate lock free instructions. The first argument represents the - size of the object, and the second parameter is a pointer to the object - itself. If NULL is passed for the object, then the result is based on - typical alignment for an object of the specified size. Otherwise return - false. */ + case BUILT_IN_STRCPY: + target = expand_builtin_strcpy (exp, target); + if (target) + return target; + break; -static rtx -expand_builtin_atomic_always_lock_free (tree exp) -{ - tree size; - tree arg0 = CALL_EXPR_ARG (exp, 0); - tree arg1 = CALL_EXPR_ARG (exp, 1); + case BUILT_IN_STRNCAT: + target = expand_builtin_strncat (exp, target); + if (target) + return target; + break; - if (TREE_CODE (arg0) != INTEGER_CST) - { - error ("non-constant argument 1 to %qs", "__atomic_always_lock_free"); - return const0_rtx; - } + case BUILT_IN_STRNCPY: + target = expand_builtin_strncpy (exp, target); + if (target) + return target; + break; - size = fold_builtin_atomic_always_lock_free (arg0, arg1); - if (size == boolean_true_node) - return const1_rtx; - return const0_rtx; -} + case BUILT_IN_STPCPY: + target = expand_builtin_stpcpy (exp, target, mode); + if (target) + return target; + break; -/* Return a one or zero if it can be determined that object ARG1 of size ARG - is lock free on this architecture. */ + case BUILT_IN_STPNCPY: + target = expand_builtin_stpncpy (exp, target); + if (target) + return target; + break; -static tree -fold_builtin_atomic_is_lock_free (tree arg0, tree arg1) -{ - if (!flag_inline_atomics) - return NULL_TREE; - - /* If it isn't always lock free, don't generate a result. */ - if (fold_builtin_atomic_always_lock_free (arg0, arg1) == boolean_true_node) - return boolean_true_node; + case BUILT_IN_MEMCHR: + target = expand_builtin_memchr (exp, target); + if (target) + return target; + break; - return NULL_TREE; -} + case BUILT_IN_MEMCPY: + target = expand_builtin_memcpy (exp, target); + if (target) + return target; + break; -/* Return true if the parameters to call EXP represent an object which will - always generate lock free instructions. The first argument represents the - size of the object, and the second parameter is a pointer to the object - itself. If NULL is passed for the object, then the result is based on - typical alignment for an object of the specified size. Otherwise return - NULL*/ + case BUILT_IN_MEMMOVE: + target = expand_builtin_memmove (exp, target); + if (target) + return target; + break; -static rtx -expand_builtin_atomic_is_lock_free (tree exp) -{ - tree size; - tree arg0 = CALL_EXPR_ARG (exp, 0); - tree arg1 = CALL_EXPR_ARG (exp, 1); + case BUILT_IN_MEMPCPY: + target = expand_builtin_mempcpy (exp, target); + if (target) + return target; + break; - if (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))) - { - error ("non-integer argument 1 to %qs", "__atomic_is_lock_free"); - return NULL_RTX; - } + case BUILT_IN_MEMSET: + target = expand_builtin_memset (exp, target, mode); + if (target) + return target; + break; - if (!flag_inline_atomics) - return NULL_RTX; + case BUILT_IN_BZERO: + target = expand_builtin_bzero (exp); + if (target) + return target; + break; - /* If the value is known at compile time, return the RTX for it. */ - size = fold_builtin_atomic_is_lock_free (arg0, arg1); - if (size == boolean_true_node) - return const1_rtx; + /* Expand it as BUILT_IN_MEMCMP_EQ first. If not successful, change it + back to a BUILT_IN_STRCMP. Remember to delete the 3rd parameter + when changing it to a strcmp call. */ + case BUILT_IN_STRCMP_EQ: + target = expand_builtin_memcmp (exp, target, true); + if (target) + return target; - return NULL_RTX; -} + /* Change this call back to a BUILT_IN_STRCMP. */ + TREE_OPERAND (exp, 1) + = build_fold_addr_expr (builtin_decl_explicit (BUILT_IN_STRCMP)); -/* Expand the __atomic_thread_fence intrinsic: - void __atomic_thread_fence (enum memmodel) - EXP is the CALL_EXPR. */ + /* Delete the last parameter. */ + unsigned int i; + vec *arg_vec; + vec_alloc (arg_vec, 2); + for (i = 0; i < 2; i++) + arg_vec->quick_push (CALL_EXPR_ARG (exp, i)); + exp = build_call_vec (TREE_TYPE (exp), CALL_EXPR_FN (exp), arg_vec); + /* FALLTHROUGH */ -static void -expand_builtin_atomic_thread_fence (tree exp) -{ - enum memmodel model = get_memmodel (CALL_EXPR_ARG (exp, 0)); - expand_mem_thread_fence (model); -} + case BUILT_IN_STRCMP: + target = expand_builtin_strcmp (exp, target); + if (target) + return target; + break; -/* Expand the __atomic_signal_fence intrinsic: - void __atomic_signal_fence (enum memmodel) - EXP is the CALL_EXPR. */ + /* Expand it as BUILT_IN_MEMCMP_EQ first. If not successful, change it + back to a BUILT_IN_STRNCMP. */ + case BUILT_IN_STRNCMP_EQ: + target = expand_builtin_memcmp (exp, target, true); + if (target) + return target; -static void -expand_builtin_atomic_signal_fence (tree exp) -{ - enum memmodel model = get_memmodel (CALL_EXPR_ARG (exp, 0)); - expand_mem_signal_fence (model); -} + /* Change it back to a BUILT_IN_STRNCMP. */ + TREE_OPERAND (exp, 1) + = build_fold_addr_expr (builtin_decl_explicit (BUILT_IN_STRNCMP)); + /* FALLTHROUGH */ -/* Expand the __sync_synchronize intrinsic. */ + case BUILT_IN_STRNCMP: + target = expand_builtin_strncmp (exp, target, mode); + if (target) + return target; + break; -static void -expand_builtin_sync_synchronize (void) -{ - expand_mem_thread_fence (MEMMODEL_SYNC_SEQ_CST); -} + case BUILT_IN_BCMP: + case BUILT_IN_MEMCMP: + case BUILT_IN_MEMCMP_EQ: + target = expand_builtin_memcmp (exp, target, fcode == BUILT_IN_MEMCMP_EQ); + if (target) + return target; + if (fcode == BUILT_IN_MEMCMP_EQ) + { + tree newdecl = builtin_decl_explicit (BUILT_IN_MEMCMP); + TREE_OPERAND (exp, 1) = build_fold_addr_expr (newdecl); + } + break; -static rtx -expand_builtin_thread_pointer (tree exp, rtx target) -{ - enum insn_code icode; - if (!validate_arglist (exp, VOID_TYPE)) - return const0_rtx; - icode = direct_optab_handler (get_thread_pointer_optab, Pmode); - if (icode != CODE_FOR_nothing) - { - class expand_operand op; - /* If the target is not sutitable then create a new target. */ - if (target == NULL_RTX - || !REG_P (target) - || GET_MODE (target) != Pmode) - target = gen_reg_rtx (Pmode); - create_output_operand (&op, target, Pmode); - expand_insn (icode, 1, &op); - return target; - } - error ("%<__builtin_thread_pointer%> is not supported on this target"); - return const0_rtx; -} + case BUILT_IN_SETJMP: + /* This should have been lowered to the builtins below. */ + gcc_unreachable (); -static void -expand_builtin_set_thread_pointer (tree exp) -{ - enum insn_code icode; - if (!validate_arglist (exp, POINTER_TYPE, VOID_TYPE)) - return; - icode = direct_optab_handler (set_thread_pointer_optab, Pmode); - if (icode != CODE_FOR_nothing) - { - class expand_operand op; - rtx val = expand_expr (CALL_EXPR_ARG (exp, 0), NULL_RTX, - Pmode, EXPAND_NORMAL); - create_input_operand (&op, val, Pmode); - expand_insn (icode, 1, &op); - return; - } - error ("%<__builtin_set_thread_pointer%> is not supported on this target"); -} + case BUILT_IN_SETJMP_SETUP: + /* __builtin_setjmp_setup is passed a pointer to an array of five words + and the receiver label. */ + if (validate_arglist (exp, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) + { + rtx buf_addr = expand_expr (CALL_EXPR_ARG (exp, 0), subtarget, + VOIDmode, EXPAND_NORMAL); + tree label = TREE_OPERAND (CALL_EXPR_ARG (exp, 1), 0); + rtx_insn *label_r = label_rtx (label); - -/* Emit code to restore the current value of stack. */ + /* This is copied from the handling of non-local gotos. */ + expand_builtin_setjmp_setup (buf_addr, label_r); + nonlocal_goto_handler_labels + = gen_rtx_INSN_LIST (VOIDmode, label_r, + nonlocal_goto_handler_labels); + /* ??? Do not let expand_label treat us as such since we would + not want to be both on the list of non-local labels and on + the list of forced labels. */ + FORCED_LABEL (label) = 0; + return const0_rtx; + } + break; -static void -expand_stack_restore (tree var) -{ - rtx_insn *prev; - rtx sa = expand_normal (var); - - sa = convert_memory_address (Pmode, sa); - - prev = get_last_insn (); - emit_stack_restore (SAVE_BLOCK, sa); + case BUILT_IN_SETJMP_RECEIVER: + /* __builtin_setjmp_receiver is passed the receiver label. */ + if (validate_arglist (exp, POINTER_TYPE, VOID_TYPE)) + { + tree label = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0); + rtx_insn *label_r = label_rtx (label); - record_new_stack_level (); + expand_builtin_setjmp_receiver (label_r); + return const0_rtx; + } + break; - fixup_args_size_notes (prev, get_last_insn (), 0); -} + /* __builtin_longjmp is passed a pointer to an array of five words. + It's similar to the C library longjmp function but works with + __builtin_setjmp above. */ + case BUILT_IN_LONGJMP: + if (validate_arglist (exp, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) + { + rtx buf_addr = expand_expr (CALL_EXPR_ARG (exp, 0), subtarget, + VOIDmode, EXPAND_NORMAL); + rtx value = expand_normal (CALL_EXPR_ARG (exp, 1)); -/* Emit code to save the current value of stack. */ + if (value != const1_rtx) + { + error ("%<__builtin_longjmp%> second argument must be 1"); + return const0_rtx; + } -static rtx -expand_stack_save (void) -{ - rtx ret = NULL_RTX; + expand_builtin_longjmp (buf_addr, value); + return const0_rtx; + } + break; - emit_stack_save (SAVE_BLOCK, &ret); - return ret; -} + case BUILT_IN_NONLOCAL_GOTO: + target = expand_builtin_nonlocal_goto (exp); + if (target) + return target; + break; -/* Emit code to get the openacc gang, worker or vector id or size. */ + /* This updates the setjmp buffer that is its argument with the value + of the current stack pointer. */ + case BUILT_IN_UPDATE_SETJMP_BUF: + if (validate_arglist (exp, POINTER_TYPE, VOID_TYPE)) + { + rtx buf_addr + = expand_normal (CALL_EXPR_ARG (exp, 0)); -static rtx -expand_builtin_goacc_parlevel_id_size (tree exp, rtx target, int ignore) -{ - const char *name; - rtx fallback_retval; - rtx_insn *(*gen_fn) (rtx, rtx); - switch (DECL_FUNCTION_CODE (get_callee_fndecl (exp))) - { - case BUILT_IN_GOACC_PARLEVEL_ID: - name = "__builtin_goacc_parlevel_id"; - fallback_retval = const0_rtx; - gen_fn = targetm.gen_oacc_dim_pos; - break; - case BUILT_IN_GOACC_PARLEVEL_SIZE: - name = "__builtin_goacc_parlevel_size"; - fallback_retval = const1_rtx; - gen_fn = targetm.gen_oacc_dim_size; + expand_builtin_update_setjmp_buf (buf_addr); + return const0_rtx; + } break; - default: - gcc_unreachable (); - } - if (oacc_get_fn_attrib (current_function_decl) == NULL_TREE) - { - error ("%qs only supported in OpenACC code", name); + case BUILT_IN_TRAP: + expand_builtin_trap (); return const0_rtx; - } - tree arg = CALL_EXPR_ARG (exp, 0); - if (TREE_CODE (arg) != INTEGER_CST) - { - error ("non-constant argument 0 to %qs", name); + case BUILT_IN_UNREACHABLE: + expand_builtin_unreachable (); return const0_rtx; - } - int dim = TREE_INT_CST_LOW (arg); - switch (dim) - { - case GOMP_DIM_GANG: - case GOMP_DIM_WORKER: - case GOMP_DIM_VECTOR: + CASE_FLT_FN (BUILT_IN_SIGNBIT): + case BUILT_IN_SIGNBITD32: + case BUILT_IN_SIGNBITD64: + case BUILT_IN_SIGNBITD128: + target = expand_builtin_signbit (exp, target); + if (target) + return target; break; - default: - error ("illegal argument 0 to %qs", name); - return const0_rtx; - } - if (ignore) - return target; + /* Various hooks for the DWARF 2 __throw routine. */ + case BUILT_IN_UNWIND_INIT: + expand_builtin_unwind_init (); + return const0_rtx; + case BUILT_IN_DWARF_CFA: + return virtual_cfa_rtx; +#ifdef DWARF2_UNWIND_INFO + case BUILT_IN_DWARF_SP_COLUMN: + return expand_builtin_dwarf_sp_column (); + case BUILT_IN_INIT_DWARF_REG_SIZES: + expand_builtin_init_dwarf_reg_sizes (CALL_EXPR_ARG (exp, 0)); + return const0_rtx; +#endif + case BUILT_IN_FROB_RETURN_ADDR: + return expand_builtin_frob_return_addr (CALL_EXPR_ARG (exp, 0)); + case BUILT_IN_EXTRACT_RETURN_ADDR: + return expand_builtin_extract_return_addr (CALL_EXPR_ARG (exp, 0)); + case BUILT_IN_EH_RETURN: + expand_builtin_eh_return (CALL_EXPR_ARG (exp, 0), + CALL_EXPR_ARG (exp, 1)); + return const0_rtx; + case BUILT_IN_EH_RETURN_DATA_REGNO: + return expand_builtin_eh_return_data_regno (exp); + case BUILT_IN_EXTEND_POINTER: + return expand_builtin_extend_pointer (CALL_EXPR_ARG (exp, 0)); + case BUILT_IN_EH_POINTER: + return expand_builtin_eh_pointer (exp); + case BUILT_IN_EH_FILTER: + return expand_builtin_eh_filter (exp); + case BUILT_IN_EH_COPY_VALUES: + return expand_builtin_eh_copy_values (exp); - if (target == NULL_RTX) - target = gen_reg_rtx (TYPE_MODE (TREE_TYPE (exp))); + case BUILT_IN_VA_START: + return expand_builtin_va_start (exp); + case BUILT_IN_VA_END: + return expand_builtin_va_end (exp); + case BUILT_IN_VA_COPY: + return expand_builtin_va_copy (exp); + case BUILT_IN_EXPECT: + return expand_builtin_expect (exp, target); + case BUILT_IN_EXPECT_WITH_PROBABILITY: + return expand_builtin_expect_with_probability (exp, target); + case BUILT_IN_ASSUME_ALIGNED: + return expand_builtin_assume_aligned (exp, target); + case BUILT_IN_PREFETCH: + expand_builtin_prefetch (exp); + return const0_rtx; - if (!targetm.have_oacc_dim_size ()) - { - emit_move_insn (target, fallback_retval); - return target; - } + case BUILT_IN_INIT_TRAMPOLINE: + return expand_builtin_init_trampoline (exp, true); + case BUILT_IN_INIT_HEAP_TRAMPOLINE: + return expand_builtin_init_trampoline (exp, false); + case BUILT_IN_ADJUST_TRAMPOLINE: + return expand_builtin_adjust_trampoline (exp); - rtx reg = MEM_P (target) ? gen_reg_rtx (GET_MODE (target)) : target; - emit_insn (gen_fn (reg, GEN_INT (dim))); - if (reg != target) - emit_move_insn (target, reg); + case BUILT_IN_INIT_DESCRIPTOR: + return expand_builtin_init_descriptor (exp); + case BUILT_IN_ADJUST_DESCRIPTOR: + return expand_builtin_adjust_descriptor (exp); - return target; -} + case BUILT_IN_FORK: + case BUILT_IN_EXECL: + case BUILT_IN_EXECV: + case BUILT_IN_EXECLP: + case BUILT_IN_EXECLE: + case BUILT_IN_EXECVP: + case BUILT_IN_EXECVE: + target = expand_builtin_fork_or_exec (fndecl, exp, target, ignore); + if (target) + return target; + break; -/* Expand a string compare operation using a sequence of char comparison - to get rid of the calling overhead, with result going to TARGET if - that's convenient. + case BUILT_IN_SYNC_FETCH_AND_ADD_1: + case BUILT_IN_SYNC_FETCH_AND_ADD_2: + case BUILT_IN_SYNC_FETCH_AND_ADD_4: + case BUILT_IN_SYNC_FETCH_AND_ADD_8: + case BUILT_IN_SYNC_FETCH_AND_ADD_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_ADD_1); + target = expand_builtin_sync_operation (mode, exp, PLUS, false, target); + if (target) + return target; + break; - VAR_STR is the variable string source; - CONST_STR is the constant string source; - LENGTH is the number of chars to compare; - CONST_STR_N indicates which source string is the constant string; - IS_MEMCMP indicates whether it's a memcmp or strcmp. - - to: (assume const_str_n is 2, i.e., arg2 is a constant string) + case BUILT_IN_SYNC_FETCH_AND_SUB_1: + case BUILT_IN_SYNC_FETCH_AND_SUB_2: + case BUILT_IN_SYNC_FETCH_AND_SUB_4: + case BUILT_IN_SYNC_FETCH_AND_SUB_8: + case BUILT_IN_SYNC_FETCH_AND_SUB_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_SUB_1); + target = expand_builtin_sync_operation (mode, exp, MINUS, false, target); + if (target) + return target; + break; - target = (int) (unsigned char) var_str[0] - - (int) (unsigned char) const_str[0]; - if (target != 0) - goto ne_label; - ... - target = (int) (unsigned char) var_str[length - 2] - - (int) (unsigned char) const_str[length - 2]; - if (target != 0) - goto ne_label; - target = (int) (unsigned char) var_str[length - 1] - - (int) (unsigned char) const_str[length - 1]; - ne_label: - */ + case BUILT_IN_SYNC_FETCH_AND_OR_1: + case BUILT_IN_SYNC_FETCH_AND_OR_2: + case BUILT_IN_SYNC_FETCH_AND_OR_4: + case BUILT_IN_SYNC_FETCH_AND_OR_8: + case BUILT_IN_SYNC_FETCH_AND_OR_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_OR_1); + target = expand_builtin_sync_operation (mode, exp, IOR, false, target); + if (target) + return target; + break; -static rtx -inline_string_cmp (rtx target, tree var_str, const char *const_str, - unsigned HOST_WIDE_INT length, - int const_str_n, machine_mode mode) -{ - HOST_WIDE_INT offset = 0; - rtx var_rtx_array - = get_memory_rtx (var_str, build_int_cst (unsigned_type_node,length)); - rtx var_rtx = NULL_RTX; - rtx const_rtx = NULL_RTX; - rtx result = target ? target : gen_reg_rtx (mode); - rtx_code_label *ne_label = gen_label_rtx (); - tree unit_type_node = unsigned_char_type_node; - scalar_int_mode unit_mode - = as_a TYPE_MODE (unit_type_node); - - start_sequence (); + case BUILT_IN_SYNC_FETCH_AND_AND_1: + case BUILT_IN_SYNC_FETCH_AND_AND_2: + case BUILT_IN_SYNC_FETCH_AND_AND_4: + case BUILT_IN_SYNC_FETCH_AND_AND_8: + case BUILT_IN_SYNC_FETCH_AND_AND_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_AND_1); + target = expand_builtin_sync_operation (mode, exp, AND, false, target); + if (target) + return target; + break; - for (unsigned HOST_WIDE_INT i = 0; i < length; i++) - { - var_rtx - = adjust_address (var_rtx_array, TYPE_MODE (unit_type_node), offset); - const_rtx = c_readstr (const_str + offset, unit_mode); - rtx op0 = (const_str_n == 1) ? const_rtx : var_rtx; - rtx op1 = (const_str_n == 1) ? var_rtx : const_rtx; + case BUILT_IN_SYNC_FETCH_AND_XOR_1: + case BUILT_IN_SYNC_FETCH_AND_XOR_2: + case BUILT_IN_SYNC_FETCH_AND_XOR_4: + case BUILT_IN_SYNC_FETCH_AND_XOR_8: + case BUILT_IN_SYNC_FETCH_AND_XOR_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_XOR_1); + target = expand_builtin_sync_operation (mode, exp, XOR, false, target); + if (target) + return target; + break; - op0 = convert_modes (mode, unit_mode, op0, 1); - op1 = convert_modes (mode, unit_mode, op1, 1); - result = expand_simple_binop (mode, MINUS, op0, op1, - result, 1, OPTAB_WIDEN); - if (i < length - 1) - emit_cmp_and_jump_insns (result, CONST0_RTX (mode), NE, NULL_RTX, - mode, true, ne_label); - offset += GET_MODE_SIZE (unit_mode); - } + case BUILT_IN_SYNC_FETCH_AND_NAND_1: + case BUILT_IN_SYNC_FETCH_AND_NAND_2: + case BUILT_IN_SYNC_FETCH_AND_NAND_4: + case BUILT_IN_SYNC_FETCH_AND_NAND_8: + case BUILT_IN_SYNC_FETCH_AND_NAND_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_NAND_1); + target = expand_builtin_sync_operation (mode, exp, NOT, false, target); + if (target) + return target; + break; - emit_label (ne_label); - rtx_insn *insns = get_insns (); - end_sequence (); - emit_insn (insns); + case BUILT_IN_SYNC_ADD_AND_FETCH_1: + case BUILT_IN_SYNC_ADD_AND_FETCH_2: + case BUILT_IN_SYNC_ADD_AND_FETCH_4: + case BUILT_IN_SYNC_ADD_AND_FETCH_8: + case BUILT_IN_SYNC_ADD_AND_FETCH_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_ADD_AND_FETCH_1); + target = expand_builtin_sync_operation (mode, exp, PLUS, true, target); + if (target) + return target; + break; - return result; -} + case BUILT_IN_SYNC_SUB_AND_FETCH_1: + case BUILT_IN_SYNC_SUB_AND_FETCH_2: + case BUILT_IN_SYNC_SUB_AND_FETCH_4: + case BUILT_IN_SYNC_SUB_AND_FETCH_8: + case BUILT_IN_SYNC_SUB_AND_FETCH_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_SUB_AND_FETCH_1); + target = expand_builtin_sync_operation (mode, exp, MINUS, true, target); + if (target) + return target; + break; -/* Inline expansion of a call to str(n)cmp and memcmp, with result going - to TARGET if that's convenient. - If the call is not been inlined, return NULL_RTX. */ + case BUILT_IN_SYNC_OR_AND_FETCH_1: + case BUILT_IN_SYNC_OR_AND_FETCH_2: + case BUILT_IN_SYNC_OR_AND_FETCH_4: + case BUILT_IN_SYNC_OR_AND_FETCH_8: + case BUILT_IN_SYNC_OR_AND_FETCH_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_OR_AND_FETCH_1); + target = expand_builtin_sync_operation (mode, exp, IOR, true, target); + if (target) + return target; + break; -static rtx -inline_expand_builtin_bytecmp (tree exp, rtx target) -{ - tree fndecl = get_callee_fndecl (exp); - enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); - bool is_ncmp = (fcode == BUILT_IN_STRNCMP || fcode == BUILT_IN_MEMCMP); + case BUILT_IN_SYNC_AND_AND_FETCH_1: + case BUILT_IN_SYNC_AND_AND_FETCH_2: + case BUILT_IN_SYNC_AND_AND_FETCH_4: + case BUILT_IN_SYNC_AND_AND_FETCH_8: + case BUILT_IN_SYNC_AND_AND_FETCH_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_AND_AND_FETCH_1); + target = expand_builtin_sync_operation (mode, exp, AND, true, target); + if (target) + return target; + break; - /* Do NOT apply this inlining expansion when optimizing for size or - optimization level below 2. */ - if (optimize < 2 || optimize_insn_for_size_p ()) - return NULL_RTX; + case BUILT_IN_SYNC_XOR_AND_FETCH_1: + case BUILT_IN_SYNC_XOR_AND_FETCH_2: + case BUILT_IN_SYNC_XOR_AND_FETCH_4: + case BUILT_IN_SYNC_XOR_AND_FETCH_8: + case BUILT_IN_SYNC_XOR_AND_FETCH_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_XOR_AND_FETCH_1); + target = expand_builtin_sync_operation (mode, exp, XOR, true, target); + if (target) + return target; + break; - gcc_checking_assert (fcode == BUILT_IN_STRCMP - || fcode == BUILT_IN_STRNCMP - || fcode == BUILT_IN_MEMCMP); + case BUILT_IN_SYNC_NAND_AND_FETCH_1: + case BUILT_IN_SYNC_NAND_AND_FETCH_2: + case BUILT_IN_SYNC_NAND_AND_FETCH_4: + case BUILT_IN_SYNC_NAND_AND_FETCH_8: + case BUILT_IN_SYNC_NAND_AND_FETCH_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_NAND_AND_FETCH_1); + target = expand_builtin_sync_operation (mode, exp, NOT, true, target); + if (target) + return target; + break; - /* On a target where the type of the call (int) has same or narrower presicion - than unsigned char, give up the inlining expansion. */ - if (TYPE_PRECISION (unsigned_char_type_node) - >= TYPE_PRECISION (TREE_TYPE (exp))) - return NULL_RTX; + case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_1: + case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_2: + case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_4: + case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_8: + case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_16: + if (mode == VOIDmode) + mode = TYPE_MODE (boolean_type_node); + if (!target || !register_operand (target, mode)) + target = gen_reg_rtx (mode); - tree arg1 = CALL_EXPR_ARG (exp, 0); - tree arg2 = CALL_EXPR_ARG (exp, 1); - tree len3_tree = is_ncmp ? CALL_EXPR_ARG (exp, 2) : NULL_TREE; + mode = get_builtin_sync_mode + (fcode - BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_1); + target = expand_builtin_compare_and_swap (mode, exp, true, target); + if (target) + return target; + break; - unsigned HOST_WIDE_INT len1 = 0; - unsigned HOST_WIDE_INT len2 = 0; - unsigned HOST_WIDE_INT len3 = 0; + case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_1: + case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_2: + case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_4: + case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_8: + case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_16: + mode = get_builtin_sync_mode + (fcode - BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_1); + target = expand_builtin_compare_and_swap (mode, exp, false, target); + if (target) + return target; + break; - /* Get the object representation of the initializers of ARG1 and ARG2 - as strings, provided they refer to constant objects, with their byte - sizes in LEN1 and LEN2, respectively. */ - const char *bytes1 = getbyterep (arg1, &len1); - const char *bytes2 = getbyterep (arg2, &len2); + case BUILT_IN_SYNC_LOCK_TEST_AND_SET_1: + case BUILT_IN_SYNC_LOCK_TEST_AND_SET_2: + case BUILT_IN_SYNC_LOCK_TEST_AND_SET_4: + case BUILT_IN_SYNC_LOCK_TEST_AND_SET_8: + case BUILT_IN_SYNC_LOCK_TEST_AND_SET_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_LOCK_TEST_AND_SET_1); + target = expand_builtin_sync_lock_test_and_set (mode, exp, target); + if (target) + return target; + break; - /* Fail if neither argument refers to an initialized constant. */ - if (!bytes1 && !bytes2) - return NULL_RTX; + case BUILT_IN_SYNC_LOCK_RELEASE_1: + case BUILT_IN_SYNC_LOCK_RELEASE_2: + case BUILT_IN_SYNC_LOCK_RELEASE_4: + case BUILT_IN_SYNC_LOCK_RELEASE_8: + case BUILT_IN_SYNC_LOCK_RELEASE_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_LOCK_RELEASE_1); + expand_builtin_sync_lock_release (mode, exp); + return const0_rtx; - if (is_ncmp) - { - /* Fail if the memcmp/strncmp bound is not a constant. */ - if (!tree_fits_uhwi_p (len3_tree)) - return NULL_RTX; + case BUILT_IN_SYNC_SYNCHRONIZE: + expand_builtin_sync_synchronize (); + return const0_rtx; - len3 = tree_to_uhwi (len3_tree); + case BUILT_IN_ATOMIC_EXCHANGE_1: + case BUILT_IN_ATOMIC_EXCHANGE_2: + case BUILT_IN_ATOMIC_EXCHANGE_4: + case BUILT_IN_ATOMIC_EXCHANGE_8: + case BUILT_IN_ATOMIC_EXCHANGE_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_EXCHANGE_1); + target = expand_builtin_atomic_exchange (mode, exp, target); + if (target) + return target; + break; - if (fcode == BUILT_IN_MEMCMP) - { - /* Fail if the memcmp bound is greater than the size of either - of the two constant objects. */ - if ((bytes1 && len1 < len3) - || (bytes2 && len2 < len3)) - return NULL_RTX; - } - } + case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1: + case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_2: + case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_4: + case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_8: + case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_16: + { + unsigned int nargs, z; + vec *vec; - if (fcode != BUILT_IN_MEMCMP) - { - /* For string functions (i.e., strcmp and strncmp) reduce LEN1 - and LEN2 to the length of the nul-terminated string stored - in each. */ - if (bytes1 != NULL) - len1 = strnlen (bytes1, len1) + 1; - if (bytes2 != NULL) - len2 = strnlen (bytes2, len2) + 1; - } + mode = + get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1); + target = expand_builtin_atomic_compare_exchange (mode, exp, target); + if (target) + return target; - /* See inline_string_cmp. */ - int const_str_n; - if (!len1) - const_str_n = 2; - else if (!len2) - const_str_n = 1; - else if (len2 > len1) - const_str_n = 1; - else - const_str_n = 2; - - /* For strncmp only, compute the new bound as the smallest of - the lengths of the two strings (plus 1) and the bound provided - to the function. */ - unsigned HOST_WIDE_INT bound = (const_str_n == 1) ? len1 : len2; - if (is_ncmp && len3 < bound) - bound = len3; - - /* If the bound of the comparison is larger than the threshold, - do nothing. */ - if (bound > (unsigned HOST_WIDE_INT) param_builtin_string_cmp_inline_length) - return NULL_RTX; - - machine_mode mode = TYPE_MODE (TREE_TYPE (exp)); - - /* Now, start inline expansion the call. */ - return inline_string_cmp (target, (const_str_n == 1) ? arg2 : arg1, - (const_str_n == 1) ? bytes1 : bytes2, bound, - const_str_n, mode); -} - -/* Expand a call to __builtin_speculation_safe_value_. MODE - represents the size of the first argument to that call, or VOIDmode - if the argument is a pointer. IGNORE will be true if the result - isn't used. */ -static rtx -expand_speculation_safe_value (machine_mode mode, tree exp, rtx target, - bool ignore) -{ - rtx val, failsafe; - unsigned nargs = call_expr_nargs (exp); - - tree arg0 = CALL_EXPR_ARG (exp, 0); - - if (mode == VOIDmode) - { - mode = TYPE_MODE (TREE_TYPE (arg0)); - gcc_assert (GET_MODE_CLASS (mode) == MODE_INT); - } - - val = expand_expr (arg0, NULL_RTX, mode, EXPAND_NORMAL); - - /* An optional second argument can be used as a failsafe value on - some machines. If it isn't present, then the failsafe value is - assumed to be 0. */ - if (nargs > 1) - { - tree arg1 = CALL_EXPR_ARG (exp, 1); - failsafe = expand_expr (arg1, NULL_RTX, mode, EXPAND_NORMAL); - } - else - failsafe = const0_rtx; - - /* If the result isn't used, the behavior is undefined. It would be - nice to emit a warning here, but path splitting means this might - happen with legitimate code. So simply drop the builtin - expansion in that case; we've handled any side-effects above. */ - if (ignore) - return const0_rtx; - - /* If we don't have a suitable target, create one to hold the result. */ - if (target == NULL || GET_MODE (target) != mode) - target = gen_reg_rtx (mode); - - if (GET_MODE (val) != mode && GET_MODE (val) != VOIDmode) - val = convert_modes (mode, VOIDmode, val, false); - - return targetm.speculation_safe_value (mode, target, val, failsafe); -} - -/* Expand an expression EXP that calls a built-in function, - with result going to TARGET if that's convenient - (and in mode MODE if that's convenient). - SUBTARGET may be used as the target for computing one of EXP's operands. - IGNORE is nonzero if the value is to be ignored. */ - -rtx -expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode, - int ignore) -{ - tree fndecl = get_callee_fndecl (exp); - machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp)); - int flags; - - if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD) - return targetm.expand_builtin (exp, target, subtarget, mode, ignore); - - /* When ASan is enabled, we don't want to expand some memory/string - builtins and rely on libsanitizer's hooks. This allows us to avoid - redundant checks and be sure, that possible overflow will be detected - by ASan. */ - - enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); - if ((flag_sanitize & SANITIZE_ADDRESS) && asan_intercepted_p (fcode)) - return expand_call (exp, target, ignore); - - /* When not optimizing, generate calls to library functions for a certain - set of builtins. */ - if (!optimize - && !called_as_built_in (fndecl) - && fcode != BUILT_IN_FORK - && fcode != BUILT_IN_EXECL - && fcode != BUILT_IN_EXECV - && fcode != BUILT_IN_EXECLP - && fcode != BUILT_IN_EXECLE - && fcode != BUILT_IN_EXECVP - && fcode != BUILT_IN_EXECVE - && fcode != BUILT_IN_CLEAR_CACHE - && !ALLOCA_FUNCTION_CODE_P (fcode) - && fcode != BUILT_IN_FREE) - return expand_call (exp, target, ignore); - - /* The built-in function expanders test for target == const0_rtx - to determine whether the function's result will be ignored. */ - if (ignore) - target = const0_rtx; - - /* If the result of a pure or const built-in function is ignored, and - none of its arguments are volatile, we can avoid expanding the - built-in call and just evaluate the arguments for side-effects. */ - if (target == const0_rtx - && ((flags = flags_from_decl_or_type (fndecl)) & (ECF_CONST | ECF_PURE)) - && !(flags & ECF_LOOPING_CONST_OR_PURE)) - { - bool volatilep = false; - tree arg; - call_expr_arg_iterator iter; - - FOR_EACH_CALL_EXPR_ARG (arg, iter, exp) - if (TREE_THIS_VOLATILE (arg)) - { - volatilep = true; - break; - } - - if (! volatilep) - { - FOR_EACH_CALL_EXPR_ARG (arg, iter, exp) - expand_expr (arg, const0_rtx, VOIDmode, EXPAND_NORMAL); - return const0_rtx; - } - } - - switch (fcode) - { - CASE_FLT_FN (BUILT_IN_FABS): - CASE_FLT_FN_FLOATN_NX (BUILT_IN_FABS): - case BUILT_IN_FABSD32: - case BUILT_IN_FABSD64: - case BUILT_IN_FABSD128: - target = expand_builtin_fabs (exp, target, subtarget); - if (target) - return target; - break; - - CASE_FLT_FN (BUILT_IN_COPYSIGN): - CASE_FLT_FN_FLOATN_NX (BUILT_IN_COPYSIGN): - target = expand_builtin_copysign (exp, target, subtarget); - if (target) - return target; - break; - - /* Just do a normal library call if we were unable to fold - the values. */ - CASE_FLT_FN (BUILT_IN_CABS): - break; - - CASE_FLT_FN (BUILT_IN_FMA): - CASE_FLT_FN_FLOATN_NX (BUILT_IN_FMA): - target = expand_builtin_mathfn_ternary (exp, target, subtarget); - if (target) - return target; - break; - - CASE_FLT_FN (BUILT_IN_ILOGB): - if (! flag_unsafe_math_optimizations) + /* If this is turned into an external library call, the weak parameter + must be dropped to match the expected parameter list. */ + nargs = call_expr_nargs (exp); + vec_alloc (vec, nargs - 1); + for (z = 0; z < 3; z++) + vec->quick_push (CALL_EXPR_ARG (exp, z)); + /* Skip the boolean weak parameter. */ + for (z = 4; z < 6; z++) + vec->quick_push (CALL_EXPR_ARG (exp, z)); + exp = build_call_vec (TREE_TYPE (exp), CALL_EXPR_FN (exp), vec); break; - gcc_fallthrough (); - CASE_FLT_FN (BUILT_IN_ISINF): - CASE_FLT_FN (BUILT_IN_FINITE): - case BUILT_IN_ISFINITE: - case BUILT_IN_ISNORMAL: - target = expand_builtin_interclass_mathfn (exp, target); - if (target) - return target; - break; - - CASE_FLT_FN (BUILT_IN_ICEIL): - CASE_FLT_FN (BUILT_IN_LCEIL): - CASE_FLT_FN (BUILT_IN_LLCEIL): - CASE_FLT_FN (BUILT_IN_LFLOOR): - CASE_FLT_FN (BUILT_IN_IFLOOR): - CASE_FLT_FN (BUILT_IN_LLFLOOR): - target = expand_builtin_int_roundingfn (exp, target); - if (target) - return target; - break; - - CASE_FLT_FN (BUILT_IN_IRINT): - CASE_FLT_FN (BUILT_IN_LRINT): - CASE_FLT_FN (BUILT_IN_LLRINT): - CASE_FLT_FN (BUILT_IN_IROUND): - CASE_FLT_FN (BUILT_IN_LROUND): - CASE_FLT_FN (BUILT_IN_LLROUND): - target = expand_builtin_int_roundingfn_2 (exp, target); - if (target) - return target; - break; + } - CASE_FLT_FN (BUILT_IN_POWI): - target = expand_builtin_powi (exp, target); + case BUILT_IN_ATOMIC_LOAD_1: + case BUILT_IN_ATOMIC_LOAD_2: + case BUILT_IN_ATOMIC_LOAD_4: + case BUILT_IN_ATOMIC_LOAD_8: + case BUILT_IN_ATOMIC_LOAD_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_LOAD_1); + target = expand_builtin_atomic_load (mode, exp, target); if (target) return target; break; - CASE_FLT_FN (BUILT_IN_CEXPI): - target = expand_builtin_cexpi (exp, target); - gcc_assert (target); - return target; - - CASE_FLT_FN (BUILT_IN_SIN): - CASE_FLT_FN (BUILT_IN_COS): - if (! flag_unsafe_math_optimizations) - break; - target = expand_builtin_mathfn_3 (exp, target, subtarget); + case BUILT_IN_ATOMIC_STORE_1: + case BUILT_IN_ATOMIC_STORE_2: + case BUILT_IN_ATOMIC_STORE_4: + case BUILT_IN_ATOMIC_STORE_8: + case BUILT_IN_ATOMIC_STORE_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_STORE_1); + target = expand_builtin_atomic_store (mode, exp); if (target) - return target; + return const0_rtx; break; - CASE_FLT_FN (BUILT_IN_SINCOS): - if (! flag_unsafe_math_optimizations) - break; - target = expand_builtin_sincos (exp); - if (target) - return target; - break; - - case BUILT_IN_APPLY_ARGS: - return expand_builtin_apply_args (); - - /* __builtin_apply (FUNCTION, ARGUMENTS, ARGSIZE) invokes - FUNCTION with a copy of the parameters described by - ARGUMENTS, and ARGSIZE. It returns a block of memory - allocated on the stack into which is stored all the registers - that might possibly be used for returning the result of a - function. ARGUMENTS is the value returned by - __builtin_apply_args. ARGSIZE is the number of bytes of - arguments that must be copied. ??? How should this value be - computed? We'll also need a safe worst case value for varargs - functions. */ - case BUILT_IN_APPLY: - if (!validate_arglist (exp, POINTER_TYPE, - POINTER_TYPE, INTEGER_TYPE, VOID_TYPE) - && !validate_arglist (exp, REFERENCE_TYPE, - POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) - return const0_rtx; - else - { - rtx ops[3]; - - ops[0] = expand_normal (CALL_EXPR_ARG (exp, 0)); - ops[1] = expand_normal (CALL_EXPR_ARG (exp, 1)); - ops[2] = expand_normal (CALL_EXPR_ARG (exp, 2)); - - return expand_builtin_apply (ops[0], ops[1], ops[2]); - } - - /* __builtin_return (RESULT) causes the function to return the - value described by RESULT. RESULT is address of the block of - memory returned by __builtin_apply. */ - case BUILT_IN_RETURN: - if (validate_arglist (exp, POINTER_TYPE, VOID_TYPE)) - expand_builtin_return (expand_normal (CALL_EXPR_ARG (exp, 0))); - return const0_rtx; - - case BUILT_IN_SAVEREGS: - return expand_builtin_saveregs (); - - case BUILT_IN_VA_ARG_PACK: - /* All valid uses of __builtin_va_arg_pack () are removed during - inlining. */ - error ("invalid use of %<__builtin_va_arg_pack ()%>"); - return const0_rtx; - - case BUILT_IN_VA_ARG_PACK_LEN: - /* All valid uses of __builtin_va_arg_pack_len () are removed during - inlining. */ - error ("invalid use of %<__builtin_va_arg_pack_len ()%>"); - return const0_rtx; - - /* Return the address of the first anonymous stack arg. */ - case BUILT_IN_NEXT_ARG: - if (fold_builtin_next_arg (exp, false)) - return const0_rtx; - return expand_builtin_next_arg (); - - case BUILT_IN_CLEAR_CACHE: - expand_builtin___clear_cache (exp); - return const0_rtx; - - case BUILT_IN_CLASSIFY_TYPE: - return expand_builtin_classify_type (exp); - - case BUILT_IN_CONSTANT_P: - return const0_rtx; - - case BUILT_IN_FRAME_ADDRESS: - case BUILT_IN_RETURN_ADDRESS: - return expand_builtin_frame_address (fndecl, exp); - - /* Returns the address of the area where the structure is returned. - 0 otherwise. */ - case BUILT_IN_AGGREGATE_INCOMING_ADDRESS: - if (call_expr_nargs (exp) != 0 - || ! AGGREGATE_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl))) - || !MEM_P (DECL_RTL (DECL_RESULT (current_function_decl)))) - return const0_rtx; - else - return XEXP (DECL_RTL (DECL_RESULT (current_function_decl)), 0); - - CASE_BUILT_IN_ALLOCA: - target = expand_builtin_alloca (exp); - if (target) - return target; - break; - - case BUILT_IN_ASAN_ALLOCAS_UNPOISON: - return expand_asan_emit_allocas_unpoison (exp); - - case BUILT_IN_STACK_SAVE: - return expand_stack_save (); - - case BUILT_IN_STACK_RESTORE: - expand_stack_restore (CALL_EXPR_ARG (exp, 0)); - return const0_rtx; - - case BUILT_IN_BSWAP16: - case BUILT_IN_BSWAP32: - case BUILT_IN_BSWAP64: - case BUILT_IN_BSWAP128: - target = expand_builtin_bswap (target_mode, exp, target, subtarget); - if (target) - return target; - break; - - CASE_INT_FN (BUILT_IN_FFS): - target = expand_builtin_unop (target_mode, exp, target, - subtarget, ffs_optab); - if (target) - return target; - break; - - CASE_INT_FN (BUILT_IN_CLZ): - target = expand_builtin_unop (target_mode, exp, target, - subtarget, clz_optab); - if (target) - return target; - break; - - CASE_INT_FN (BUILT_IN_CTZ): - target = expand_builtin_unop (target_mode, exp, target, - subtarget, ctz_optab); - if (target) - return target; - break; - - CASE_INT_FN (BUILT_IN_CLRSB): - target = expand_builtin_unop (target_mode, exp, target, - subtarget, clrsb_optab); - if (target) - return target; - break; - - CASE_INT_FN (BUILT_IN_POPCOUNT): - target = expand_builtin_unop (target_mode, exp, target, - subtarget, popcount_optab); - if (target) - return target; - break; - - CASE_INT_FN (BUILT_IN_PARITY): - target = expand_builtin_unop (target_mode, exp, target, - subtarget, parity_optab); - if (target) - return target; - break; - - case BUILT_IN_STRLEN: - target = expand_builtin_strlen (exp, target, target_mode); - if (target) - return target; - break; - - case BUILT_IN_STRNLEN: - target = expand_builtin_strnlen (exp, target, target_mode); - if (target) - return target; - break; - - case BUILT_IN_STRCAT: - target = expand_builtin_strcat (exp); - if (target) - return target; - break; - - case BUILT_IN_GETTEXT: - case BUILT_IN_PUTS: - case BUILT_IN_PUTS_UNLOCKED: - case BUILT_IN_STRDUP: - if (validate_arglist (exp, POINTER_TYPE, VOID_TYPE)) - check_read_access (exp, CALL_EXPR_ARG (exp, 0)); - break; - - case BUILT_IN_INDEX: - case BUILT_IN_RINDEX: - case BUILT_IN_STRCHR: - case BUILT_IN_STRRCHR: - if (validate_arglist (exp, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) - check_read_access (exp, CALL_EXPR_ARG (exp, 0)); - break; - - case BUILT_IN_FPUTS: - case BUILT_IN_FPUTS_UNLOCKED: - if (validate_arglist (exp, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) - check_read_access (exp, CALL_EXPR_ARG (exp, 0)); - break; - - case BUILT_IN_STRNDUP: - if (validate_arglist (exp, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) - check_read_access (exp, CALL_EXPR_ARG (exp, 0), CALL_EXPR_ARG (exp, 1)); - break; - - case BUILT_IN_STRCASECMP: - case BUILT_IN_STRPBRK: - case BUILT_IN_STRSPN: - case BUILT_IN_STRCSPN: - case BUILT_IN_STRSTR: - if (validate_arglist (exp, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) - { - check_read_access (exp, CALL_EXPR_ARG (exp, 0)); - check_read_access (exp, CALL_EXPR_ARG (exp, 1)); - } - break; - - case BUILT_IN_STRCPY: - target = expand_builtin_strcpy (exp, target); - if (target) - return target; - break; - - case BUILT_IN_STRNCAT: - target = expand_builtin_strncat (exp, target); - if (target) - return target; - break; - - case BUILT_IN_STRNCPY: - target = expand_builtin_strncpy (exp, target); - if (target) - return target; - break; - - case BUILT_IN_STPCPY: - target = expand_builtin_stpcpy (exp, target, mode); - if (target) - return target; - break; - - case BUILT_IN_STPNCPY: - target = expand_builtin_stpncpy (exp, target); - if (target) - return target; - break; - - case BUILT_IN_MEMCHR: - target = expand_builtin_memchr (exp, target); - if (target) - return target; - break; - - case BUILT_IN_MEMCPY: - target = expand_builtin_memcpy (exp, target); - if (target) - return target; - break; - - case BUILT_IN_MEMMOVE: - target = expand_builtin_memmove (exp, target); - if (target) - return target; - break; - - case BUILT_IN_MEMPCPY: - target = expand_builtin_mempcpy (exp, target); - if (target) - return target; - break; - - case BUILT_IN_MEMSET: - target = expand_builtin_memset (exp, target, mode); - if (target) - return target; - break; - - case BUILT_IN_BZERO: - target = expand_builtin_bzero (exp); - if (target) - return target; - break; - - /* Expand it as BUILT_IN_MEMCMP_EQ first. If not successful, change it - back to a BUILT_IN_STRCMP. Remember to delete the 3rd parameter - when changing it to a strcmp call. */ - case BUILT_IN_STRCMP_EQ: - target = expand_builtin_memcmp (exp, target, true); - if (target) - return target; - - /* Change this call back to a BUILT_IN_STRCMP. */ - TREE_OPERAND (exp, 1) - = build_fold_addr_expr (builtin_decl_explicit (BUILT_IN_STRCMP)); - - /* Delete the last parameter. */ - unsigned int i; - vec *arg_vec; - vec_alloc (arg_vec, 2); - for (i = 0; i < 2; i++) - arg_vec->quick_push (CALL_EXPR_ARG (exp, i)); - exp = build_call_vec (TREE_TYPE (exp), CALL_EXPR_FN (exp), arg_vec); - /* FALLTHROUGH */ - - case BUILT_IN_STRCMP: - target = expand_builtin_strcmp (exp, target); - if (target) - return target; - break; - - /* Expand it as BUILT_IN_MEMCMP_EQ first. If not successful, change it - back to a BUILT_IN_STRNCMP. */ - case BUILT_IN_STRNCMP_EQ: - target = expand_builtin_memcmp (exp, target, true); - if (target) - return target; - - /* Change it back to a BUILT_IN_STRNCMP. */ - TREE_OPERAND (exp, 1) - = build_fold_addr_expr (builtin_decl_explicit (BUILT_IN_STRNCMP)); - /* FALLTHROUGH */ - - case BUILT_IN_STRNCMP: - target = expand_builtin_strncmp (exp, target, mode); - if (target) - return target; - break; - - case BUILT_IN_BCMP: - case BUILT_IN_MEMCMP: - case BUILT_IN_MEMCMP_EQ: - target = expand_builtin_memcmp (exp, target, fcode == BUILT_IN_MEMCMP_EQ); - if (target) - return target; - if (fcode == BUILT_IN_MEMCMP_EQ) - { - tree newdecl = builtin_decl_explicit (BUILT_IN_MEMCMP); - TREE_OPERAND (exp, 1) = build_fold_addr_expr (newdecl); - } - break; - - case BUILT_IN_SETJMP: - /* This should have been lowered to the builtins below. */ - gcc_unreachable (); - - case BUILT_IN_SETJMP_SETUP: - /* __builtin_setjmp_setup is passed a pointer to an array of five words - and the receiver label. */ - if (validate_arglist (exp, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) - { - rtx buf_addr = expand_expr (CALL_EXPR_ARG (exp, 0), subtarget, - VOIDmode, EXPAND_NORMAL); - tree label = TREE_OPERAND (CALL_EXPR_ARG (exp, 1), 0); - rtx_insn *label_r = label_rtx (label); - - /* This is copied from the handling of non-local gotos. */ - expand_builtin_setjmp_setup (buf_addr, label_r); - nonlocal_goto_handler_labels - = gen_rtx_INSN_LIST (VOIDmode, label_r, - nonlocal_goto_handler_labels); - /* ??? Do not let expand_label treat us as such since we would - not want to be both on the list of non-local labels and on - the list of forced labels. */ - FORCED_LABEL (label) = 0; - return const0_rtx; - } - break; - - case BUILT_IN_SETJMP_RECEIVER: - /* __builtin_setjmp_receiver is passed the receiver label. */ - if (validate_arglist (exp, POINTER_TYPE, VOID_TYPE)) - { - tree label = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0); - rtx_insn *label_r = label_rtx (label); - - expand_builtin_setjmp_receiver (label_r); - return const0_rtx; - } - break; - - /* __builtin_longjmp is passed a pointer to an array of five words. - It's similar to the C library longjmp function but works with - __builtin_setjmp above. */ - case BUILT_IN_LONGJMP: - if (validate_arglist (exp, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) - { - rtx buf_addr = expand_expr (CALL_EXPR_ARG (exp, 0), subtarget, - VOIDmode, EXPAND_NORMAL); - rtx value = expand_normal (CALL_EXPR_ARG (exp, 1)); - - if (value != const1_rtx) - { - error ("%<__builtin_longjmp%> second argument must be 1"); - return const0_rtx; - } - - expand_builtin_longjmp (buf_addr, value); - return const0_rtx; - } - break; - - case BUILT_IN_NONLOCAL_GOTO: - target = expand_builtin_nonlocal_goto (exp); - if (target) - return target; - break; - - /* This updates the setjmp buffer that is its argument with the value - of the current stack pointer. */ - case BUILT_IN_UPDATE_SETJMP_BUF: - if (validate_arglist (exp, POINTER_TYPE, VOID_TYPE)) - { - rtx buf_addr - = expand_normal (CALL_EXPR_ARG (exp, 0)); - - expand_builtin_update_setjmp_buf (buf_addr); - return const0_rtx; - } - break; - - case BUILT_IN_TRAP: - expand_builtin_trap (); - return const0_rtx; - - case BUILT_IN_UNREACHABLE: - expand_builtin_unreachable (); - return const0_rtx; - - CASE_FLT_FN (BUILT_IN_SIGNBIT): - case BUILT_IN_SIGNBITD32: - case BUILT_IN_SIGNBITD64: - case BUILT_IN_SIGNBITD128: - target = expand_builtin_signbit (exp, target); - if (target) - return target; - break; - - /* Various hooks for the DWARF 2 __throw routine. */ - case BUILT_IN_UNWIND_INIT: - expand_builtin_unwind_init (); - return const0_rtx; - case BUILT_IN_DWARF_CFA: - return virtual_cfa_rtx; -#ifdef DWARF2_UNWIND_INFO - case BUILT_IN_DWARF_SP_COLUMN: - return expand_builtin_dwarf_sp_column (); - case BUILT_IN_INIT_DWARF_REG_SIZES: - expand_builtin_init_dwarf_reg_sizes (CALL_EXPR_ARG (exp, 0)); - return const0_rtx; -#endif - case BUILT_IN_FROB_RETURN_ADDR: - return expand_builtin_frob_return_addr (CALL_EXPR_ARG (exp, 0)); - case BUILT_IN_EXTRACT_RETURN_ADDR: - return expand_builtin_extract_return_addr (CALL_EXPR_ARG (exp, 0)); - case BUILT_IN_EH_RETURN: - expand_builtin_eh_return (CALL_EXPR_ARG (exp, 0), - CALL_EXPR_ARG (exp, 1)); - return const0_rtx; - case BUILT_IN_EH_RETURN_DATA_REGNO: - return expand_builtin_eh_return_data_regno (exp); - case BUILT_IN_EXTEND_POINTER: - return expand_builtin_extend_pointer (CALL_EXPR_ARG (exp, 0)); - case BUILT_IN_EH_POINTER: - return expand_builtin_eh_pointer (exp); - case BUILT_IN_EH_FILTER: - return expand_builtin_eh_filter (exp); - case BUILT_IN_EH_COPY_VALUES: - return expand_builtin_eh_copy_values (exp); - - case BUILT_IN_VA_START: - return expand_builtin_va_start (exp); - case BUILT_IN_VA_END: - return expand_builtin_va_end (exp); - case BUILT_IN_VA_COPY: - return expand_builtin_va_copy (exp); - case BUILT_IN_EXPECT: - return expand_builtin_expect (exp, target); - case BUILT_IN_EXPECT_WITH_PROBABILITY: - return expand_builtin_expect_with_probability (exp, target); - case BUILT_IN_ASSUME_ALIGNED: - return expand_builtin_assume_aligned (exp, target); - case BUILT_IN_PREFETCH: - expand_builtin_prefetch (exp); - return const0_rtx; - - case BUILT_IN_INIT_TRAMPOLINE: - return expand_builtin_init_trampoline (exp, true); - case BUILT_IN_INIT_HEAP_TRAMPOLINE: - return expand_builtin_init_trampoline (exp, false); - case BUILT_IN_ADJUST_TRAMPOLINE: - return expand_builtin_adjust_trampoline (exp); - - case BUILT_IN_INIT_DESCRIPTOR: - return expand_builtin_init_descriptor (exp); - case BUILT_IN_ADJUST_DESCRIPTOR: - return expand_builtin_adjust_descriptor (exp); - - case BUILT_IN_FORK: - case BUILT_IN_EXECL: - case BUILT_IN_EXECV: - case BUILT_IN_EXECLP: - case BUILT_IN_EXECLE: - case BUILT_IN_EXECVP: - case BUILT_IN_EXECVE: - target = expand_builtin_fork_or_exec (fndecl, exp, target, ignore); - if (target) - return target; - break; - - case BUILT_IN_SYNC_FETCH_AND_ADD_1: - case BUILT_IN_SYNC_FETCH_AND_ADD_2: - case BUILT_IN_SYNC_FETCH_AND_ADD_4: - case BUILT_IN_SYNC_FETCH_AND_ADD_8: - case BUILT_IN_SYNC_FETCH_AND_ADD_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_ADD_1); - target = expand_builtin_sync_operation (mode, exp, PLUS, false, target); - if (target) - return target; - break; - - case BUILT_IN_SYNC_FETCH_AND_SUB_1: - case BUILT_IN_SYNC_FETCH_AND_SUB_2: - case BUILT_IN_SYNC_FETCH_AND_SUB_4: - case BUILT_IN_SYNC_FETCH_AND_SUB_8: - case BUILT_IN_SYNC_FETCH_AND_SUB_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_SUB_1); - target = expand_builtin_sync_operation (mode, exp, MINUS, false, target); - if (target) - return target; - break; - - case BUILT_IN_SYNC_FETCH_AND_OR_1: - case BUILT_IN_SYNC_FETCH_AND_OR_2: - case BUILT_IN_SYNC_FETCH_AND_OR_4: - case BUILT_IN_SYNC_FETCH_AND_OR_8: - case BUILT_IN_SYNC_FETCH_AND_OR_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_OR_1); - target = expand_builtin_sync_operation (mode, exp, IOR, false, target); - if (target) - return target; - break; - - case BUILT_IN_SYNC_FETCH_AND_AND_1: - case BUILT_IN_SYNC_FETCH_AND_AND_2: - case BUILT_IN_SYNC_FETCH_AND_AND_4: - case BUILT_IN_SYNC_FETCH_AND_AND_8: - case BUILT_IN_SYNC_FETCH_AND_AND_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_AND_1); - target = expand_builtin_sync_operation (mode, exp, AND, false, target); - if (target) - return target; - break; - - case BUILT_IN_SYNC_FETCH_AND_XOR_1: - case BUILT_IN_SYNC_FETCH_AND_XOR_2: - case BUILT_IN_SYNC_FETCH_AND_XOR_4: - case BUILT_IN_SYNC_FETCH_AND_XOR_8: - case BUILT_IN_SYNC_FETCH_AND_XOR_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_XOR_1); - target = expand_builtin_sync_operation (mode, exp, XOR, false, target); - if (target) - return target; - break; - - case BUILT_IN_SYNC_FETCH_AND_NAND_1: - case BUILT_IN_SYNC_FETCH_AND_NAND_2: - case BUILT_IN_SYNC_FETCH_AND_NAND_4: - case BUILT_IN_SYNC_FETCH_AND_NAND_8: - case BUILT_IN_SYNC_FETCH_AND_NAND_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_NAND_1); - target = expand_builtin_sync_operation (mode, exp, NOT, false, target); - if (target) - return target; - break; - - case BUILT_IN_SYNC_ADD_AND_FETCH_1: - case BUILT_IN_SYNC_ADD_AND_FETCH_2: - case BUILT_IN_SYNC_ADD_AND_FETCH_4: - case BUILT_IN_SYNC_ADD_AND_FETCH_8: - case BUILT_IN_SYNC_ADD_AND_FETCH_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_ADD_AND_FETCH_1); - target = expand_builtin_sync_operation (mode, exp, PLUS, true, target); - if (target) - return target; - break; - - case BUILT_IN_SYNC_SUB_AND_FETCH_1: - case BUILT_IN_SYNC_SUB_AND_FETCH_2: - case BUILT_IN_SYNC_SUB_AND_FETCH_4: - case BUILT_IN_SYNC_SUB_AND_FETCH_8: - case BUILT_IN_SYNC_SUB_AND_FETCH_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_SUB_AND_FETCH_1); - target = expand_builtin_sync_operation (mode, exp, MINUS, true, target); - if (target) - return target; - break; - - case BUILT_IN_SYNC_OR_AND_FETCH_1: - case BUILT_IN_SYNC_OR_AND_FETCH_2: - case BUILT_IN_SYNC_OR_AND_FETCH_4: - case BUILT_IN_SYNC_OR_AND_FETCH_8: - case BUILT_IN_SYNC_OR_AND_FETCH_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_OR_AND_FETCH_1); - target = expand_builtin_sync_operation (mode, exp, IOR, true, target); - if (target) - return target; - break; - - case BUILT_IN_SYNC_AND_AND_FETCH_1: - case BUILT_IN_SYNC_AND_AND_FETCH_2: - case BUILT_IN_SYNC_AND_AND_FETCH_4: - case BUILT_IN_SYNC_AND_AND_FETCH_8: - case BUILT_IN_SYNC_AND_AND_FETCH_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_AND_AND_FETCH_1); - target = expand_builtin_sync_operation (mode, exp, AND, true, target); - if (target) - return target; - break; - - case BUILT_IN_SYNC_XOR_AND_FETCH_1: - case BUILT_IN_SYNC_XOR_AND_FETCH_2: - case BUILT_IN_SYNC_XOR_AND_FETCH_4: - case BUILT_IN_SYNC_XOR_AND_FETCH_8: - case BUILT_IN_SYNC_XOR_AND_FETCH_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_XOR_AND_FETCH_1); - target = expand_builtin_sync_operation (mode, exp, XOR, true, target); - if (target) - return target; - break; - - case BUILT_IN_SYNC_NAND_AND_FETCH_1: - case BUILT_IN_SYNC_NAND_AND_FETCH_2: - case BUILT_IN_SYNC_NAND_AND_FETCH_4: - case BUILT_IN_SYNC_NAND_AND_FETCH_8: - case BUILT_IN_SYNC_NAND_AND_FETCH_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_NAND_AND_FETCH_1); - target = expand_builtin_sync_operation (mode, exp, NOT, true, target); - if (target) - return target; - break; - - case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_1: - case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_2: - case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_4: - case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_8: - case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_16: - if (mode == VOIDmode) - mode = TYPE_MODE (boolean_type_node); - if (!target || !register_operand (target, mode)) - target = gen_reg_rtx (mode); - - mode = get_builtin_sync_mode - (fcode - BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_1); - target = expand_builtin_compare_and_swap (mode, exp, true, target); - if (target) - return target; - break; - - case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_1: - case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_2: - case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_4: - case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_8: - case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_16: - mode = get_builtin_sync_mode - (fcode - BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_1); - target = expand_builtin_compare_and_swap (mode, exp, false, target); - if (target) - return target; - break; - - case BUILT_IN_SYNC_LOCK_TEST_AND_SET_1: - case BUILT_IN_SYNC_LOCK_TEST_AND_SET_2: - case BUILT_IN_SYNC_LOCK_TEST_AND_SET_4: - case BUILT_IN_SYNC_LOCK_TEST_AND_SET_8: - case BUILT_IN_SYNC_LOCK_TEST_AND_SET_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_LOCK_TEST_AND_SET_1); - target = expand_builtin_sync_lock_test_and_set (mode, exp, target); - if (target) - return target; - break; - - case BUILT_IN_SYNC_LOCK_RELEASE_1: - case BUILT_IN_SYNC_LOCK_RELEASE_2: - case BUILT_IN_SYNC_LOCK_RELEASE_4: - case BUILT_IN_SYNC_LOCK_RELEASE_8: - case BUILT_IN_SYNC_LOCK_RELEASE_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_LOCK_RELEASE_1); - expand_builtin_sync_lock_release (mode, exp); - return const0_rtx; - - case BUILT_IN_SYNC_SYNCHRONIZE: - expand_builtin_sync_synchronize (); - return const0_rtx; - - case BUILT_IN_ATOMIC_EXCHANGE_1: - case BUILT_IN_ATOMIC_EXCHANGE_2: - case BUILT_IN_ATOMIC_EXCHANGE_4: - case BUILT_IN_ATOMIC_EXCHANGE_8: - case BUILT_IN_ATOMIC_EXCHANGE_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_EXCHANGE_1); - target = expand_builtin_atomic_exchange (mode, exp, target); - if (target) - return target; - break; - - case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1: - case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_2: - case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_4: - case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_8: - case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_16: - { - unsigned int nargs, z; - vec *vec; - - mode = - get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1); - target = expand_builtin_atomic_compare_exchange (mode, exp, target); - if (target) - return target; - - /* If this is turned into an external library call, the weak parameter - must be dropped to match the expected parameter list. */ - nargs = call_expr_nargs (exp); - vec_alloc (vec, nargs - 1); - for (z = 0; z < 3; z++) - vec->quick_push (CALL_EXPR_ARG (exp, z)); - /* Skip the boolean weak parameter. */ - for (z = 4; z < 6; z++) - vec->quick_push (CALL_EXPR_ARG (exp, z)); - exp = build_call_vec (TREE_TYPE (exp), CALL_EXPR_FN (exp), vec); - break; - } - - case BUILT_IN_ATOMIC_LOAD_1: - case BUILT_IN_ATOMIC_LOAD_2: - case BUILT_IN_ATOMIC_LOAD_4: - case BUILT_IN_ATOMIC_LOAD_8: - case BUILT_IN_ATOMIC_LOAD_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_LOAD_1); - target = expand_builtin_atomic_load (mode, exp, target); - if (target) - return target; - break; - - case BUILT_IN_ATOMIC_STORE_1: - case BUILT_IN_ATOMIC_STORE_2: - case BUILT_IN_ATOMIC_STORE_4: - case BUILT_IN_ATOMIC_STORE_8: - case BUILT_IN_ATOMIC_STORE_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_STORE_1); - target = expand_builtin_atomic_store (mode, exp); - if (target) - return const0_rtx; - break; - - case BUILT_IN_ATOMIC_ADD_FETCH_1: - case BUILT_IN_ATOMIC_ADD_FETCH_2: - case BUILT_IN_ATOMIC_ADD_FETCH_4: - case BUILT_IN_ATOMIC_ADD_FETCH_8: - case BUILT_IN_ATOMIC_ADD_FETCH_16: - { - enum built_in_function lib; - mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_ADD_FETCH_1); - lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_ADD_1 + - (fcode - BUILT_IN_ATOMIC_ADD_FETCH_1)); - target = expand_builtin_atomic_fetch_op (mode, exp, target, PLUS, true, - ignore, lib); - if (target) - return target; - break; - } - case BUILT_IN_ATOMIC_SUB_FETCH_1: - case BUILT_IN_ATOMIC_SUB_FETCH_2: - case BUILT_IN_ATOMIC_SUB_FETCH_4: - case BUILT_IN_ATOMIC_SUB_FETCH_8: - case BUILT_IN_ATOMIC_SUB_FETCH_16: - { - enum built_in_function lib; - mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_SUB_FETCH_1); - lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_SUB_1 + - (fcode - BUILT_IN_ATOMIC_SUB_FETCH_1)); - target = expand_builtin_atomic_fetch_op (mode, exp, target, MINUS, true, - ignore, lib); - if (target) - return target; - break; - } - case BUILT_IN_ATOMIC_AND_FETCH_1: - case BUILT_IN_ATOMIC_AND_FETCH_2: - case BUILT_IN_ATOMIC_AND_FETCH_4: - case BUILT_IN_ATOMIC_AND_FETCH_8: - case BUILT_IN_ATOMIC_AND_FETCH_16: - { - enum built_in_function lib; - mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_AND_FETCH_1); - lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_AND_1 + - (fcode - BUILT_IN_ATOMIC_AND_FETCH_1)); - target = expand_builtin_atomic_fetch_op (mode, exp, target, AND, true, - ignore, lib); - if (target) - return target; - break; - } - case BUILT_IN_ATOMIC_NAND_FETCH_1: - case BUILT_IN_ATOMIC_NAND_FETCH_2: - case BUILT_IN_ATOMIC_NAND_FETCH_4: - case BUILT_IN_ATOMIC_NAND_FETCH_8: - case BUILT_IN_ATOMIC_NAND_FETCH_16: - { - enum built_in_function lib; - mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_NAND_FETCH_1); - lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_NAND_1 + - (fcode - BUILT_IN_ATOMIC_NAND_FETCH_1)); - target = expand_builtin_atomic_fetch_op (mode, exp, target, NOT, true, - ignore, lib); - if (target) - return target; - break; - } - case BUILT_IN_ATOMIC_XOR_FETCH_1: - case BUILT_IN_ATOMIC_XOR_FETCH_2: - case BUILT_IN_ATOMIC_XOR_FETCH_4: - case BUILT_IN_ATOMIC_XOR_FETCH_8: - case BUILT_IN_ATOMIC_XOR_FETCH_16: - { - enum built_in_function lib; - mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_XOR_FETCH_1); - lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_XOR_1 + - (fcode - BUILT_IN_ATOMIC_XOR_FETCH_1)); - target = expand_builtin_atomic_fetch_op (mode, exp, target, XOR, true, - ignore, lib); - if (target) - return target; - break; - } - case BUILT_IN_ATOMIC_OR_FETCH_1: - case BUILT_IN_ATOMIC_OR_FETCH_2: - case BUILT_IN_ATOMIC_OR_FETCH_4: - case BUILT_IN_ATOMIC_OR_FETCH_8: - case BUILT_IN_ATOMIC_OR_FETCH_16: - { - enum built_in_function lib; - mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_OR_FETCH_1); - lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_OR_1 + - (fcode - BUILT_IN_ATOMIC_OR_FETCH_1)); - target = expand_builtin_atomic_fetch_op (mode, exp, target, IOR, true, - ignore, lib); - if (target) - return target; - break; - } - case BUILT_IN_ATOMIC_FETCH_ADD_1: - case BUILT_IN_ATOMIC_FETCH_ADD_2: - case BUILT_IN_ATOMIC_FETCH_ADD_4: - case BUILT_IN_ATOMIC_FETCH_ADD_8: - case BUILT_IN_ATOMIC_FETCH_ADD_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_ADD_1); - target = expand_builtin_atomic_fetch_op (mode, exp, target, PLUS, false, - ignore, BUILT_IN_NONE); - if (target) - return target; - break; - - case BUILT_IN_ATOMIC_FETCH_SUB_1: - case BUILT_IN_ATOMIC_FETCH_SUB_2: - case BUILT_IN_ATOMIC_FETCH_SUB_4: - case BUILT_IN_ATOMIC_FETCH_SUB_8: - case BUILT_IN_ATOMIC_FETCH_SUB_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_SUB_1); - target = expand_builtin_atomic_fetch_op (mode, exp, target, MINUS, false, - ignore, BUILT_IN_NONE); - if (target) - return target; - break; - - case BUILT_IN_ATOMIC_FETCH_AND_1: - case BUILT_IN_ATOMIC_FETCH_AND_2: - case BUILT_IN_ATOMIC_FETCH_AND_4: - case BUILT_IN_ATOMIC_FETCH_AND_8: - case BUILT_IN_ATOMIC_FETCH_AND_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_AND_1); - target = expand_builtin_atomic_fetch_op (mode, exp, target, AND, false, - ignore, BUILT_IN_NONE); - if (target) - return target; - break; - - case BUILT_IN_ATOMIC_FETCH_NAND_1: - case BUILT_IN_ATOMIC_FETCH_NAND_2: - case BUILT_IN_ATOMIC_FETCH_NAND_4: - case BUILT_IN_ATOMIC_FETCH_NAND_8: - case BUILT_IN_ATOMIC_FETCH_NAND_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_NAND_1); - target = expand_builtin_atomic_fetch_op (mode, exp, target, NOT, false, - ignore, BUILT_IN_NONE); - if (target) - return target; - break; - - case BUILT_IN_ATOMIC_FETCH_XOR_1: - case BUILT_IN_ATOMIC_FETCH_XOR_2: - case BUILT_IN_ATOMIC_FETCH_XOR_4: - case BUILT_IN_ATOMIC_FETCH_XOR_8: - case BUILT_IN_ATOMIC_FETCH_XOR_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_XOR_1); - target = expand_builtin_atomic_fetch_op (mode, exp, target, XOR, false, - ignore, BUILT_IN_NONE); - if (target) - return target; - break; - - case BUILT_IN_ATOMIC_FETCH_OR_1: - case BUILT_IN_ATOMIC_FETCH_OR_2: - case BUILT_IN_ATOMIC_FETCH_OR_4: - case BUILT_IN_ATOMIC_FETCH_OR_8: - case BUILT_IN_ATOMIC_FETCH_OR_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_OR_1); - target = expand_builtin_atomic_fetch_op (mode, exp, target, IOR, false, - ignore, BUILT_IN_NONE); - if (target) - return target; - break; - - case BUILT_IN_ATOMIC_TEST_AND_SET: - return expand_builtin_atomic_test_and_set (exp, target); - - case BUILT_IN_ATOMIC_CLEAR: - return expand_builtin_atomic_clear (exp); - - case BUILT_IN_ATOMIC_ALWAYS_LOCK_FREE: - return expand_builtin_atomic_always_lock_free (exp); - - case BUILT_IN_ATOMIC_IS_LOCK_FREE: - target = expand_builtin_atomic_is_lock_free (exp); - if (target) - return target; - break; - - case BUILT_IN_ATOMIC_THREAD_FENCE: - expand_builtin_atomic_thread_fence (exp); - return const0_rtx; - - case BUILT_IN_ATOMIC_SIGNAL_FENCE: - expand_builtin_atomic_signal_fence (exp); - return const0_rtx; - - case BUILT_IN_OBJECT_SIZE: - return expand_builtin_object_size (exp); - - case BUILT_IN_MEMCPY_CHK: - case BUILT_IN_MEMPCPY_CHK: - case BUILT_IN_MEMMOVE_CHK: - case BUILT_IN_MEMSET_CHK: - target = expand_builtin_memory_chk (exp, target, mode, fcode); - if (target) - return target; - break; - - case BUILT_IN_STRCPY_CHK: - case BUILT_IN_STPCPY_CHK: - case BUILT_IN_STRNCPY_CHK: - case BUILT_IN_STPNCPY_CHK: - case BUILT_IN_STRCAT_CHK: - case BUILT_IN_STRNCAT_CHK: - case BUILT_IN_SNPRINTF_CHK: - case BUILT_IN_VSNPRINTF_CHK: - maybe_emit_chk_warning (exp, fcode); - break; - - case BUILT_IN_SPRINTF_CHK: - case BUILT_IN_VSPRINTF_CHK: - maybe_emit_sprintf_chk_warning (exp, fcode); - break; - - case BUILT_IN_THREAD_POINTER: - return expand_builtin_thread_pointer (exp, target); - - case BUILT_IN_SET_THREAD_POINTER: - expand_builtin_set_thread_pointer (exp); - return const0_rtx; - - case BUILT_IN_ACC_ON_DEVICE: - /* Do library call, if we failed to expand the builtin when - folding. */ - break; - - case BUILT_IN_GOACC_PARLEVEL_ID: - case BUILT_IN_GOACC_PARLEVEL_SIZE: - return expand_builtin_goacc_parlevel_id_size (exp, target, ignore); - - case BUILT_IN_SPECULATION_SAFE_VALUE_PTR: - return expand_speculation_safe_value (VOIDmode, exp, target, ignore); - - case BUILT_IN_SPECULATION_SAFE_VALUE_1: - case BUILT_IN_SPECULATION_SAFE_VALUE_2: - case BUILT_IN_SPECULATION_SAFE_VALUE_4: - case BUILT_IN_SPECULATION_SAFE_VALUE_8: - case BUILT_IN_SPECULATION_SAFE_VALUE_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_SPECULATION_SAFE_VALUE_1); - return expand_speculation_safe_value (mode, exp, target, ignore); - - default: /* just do library call, if unknown builtin */ - break; - } - - /* The switch statement above can drop through to cause the function - to be called normally. */ - return expand_call (exp, target, ignore); -} - -/* Determine whether a tree node represents a call to a built-in - function. If the tree T is a call to a built-in function with - the right number of arguments of the appropriate types, return - the DECL_FUNCTION_CODE of the call, e.g. BUILT_IN_SQRT. - Otherwise the return value is END_BUILTINS. */ - -enum built_in_function -builtin_mathfn_code (const_tree t) -{ - const_tree fndecl, arg, parmlist; - const_tree argtype, parmtype; - const_call_expr_arg_iterator iter; - - if (TREE_CODE (t) != CALL_EXPR) - return END_BUILTINS; - - fndecl = get_callee_fndecl (t); - if (fndecl == NULL_TREE || !fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)) - return END_BUILTINS; - - parmlist = TYPE_ARG_TYPES (TREE_TYPE (fndecl)); - init_const_call_expr_arg_iterator (t, &iter); - for (; parmlist; parmlist = TREE_CHAIN (parmlist)) - { - /* If a function doesn't take a variable number of arguments, - the last element in the list will have type `void'. */ - parmtype = TREE_VALUE (parmlist); - if (VOID_TYPE_P (parmtype)) - { - if (more_const_call_expr_args_p (&iter)) - return END_BUILTINS; - return DECL_FUNCTION_CODE (fndecl); - } - - if (! more_const_call_expr_args_p (&iter)) - return END_BUILTINS; - - arg = next_const_call_expr_arg (&iter); - argtype = TREE_TYPE (arg); - - if (SCALAR_FLOAT_TYPE_P (parmtype)) - { - if (! SCALAR_FLOAT_TYPE_P (argtype)) - return END_BUILTINS; - } - else if (COMPLEX_FLOAT_TYPE_P (parmtype)) - { - if (! COMPLEX_FLOAT_TYPE_P (argtype)) - return END_BUILTINS; - } - else if (POINTER_TYPE_P (parmtype)) - { - if (! POINTER_TYPE_P (argtype)) - return END_BUILTINS; - } - else if (INTEGRAL_TYPE_P (parmtype)) - { - if (! INTEGRAL_TYPE_P (argtype)) - return END_BUILTINS; - } - else - return END_BUILTINS; - } - - /* Variable-length argument list. */ - return DECL_FUNCTION_CODE (fndecl); -} - -/* Fold a call to __builtin_constant_p, if we know its argument ARG will - evaluate to a constant. */ - -static tree -fold_builtin_constant_p (tree arg) -{ - /* We return 1 for a numeric type that's known to be a constant - value at compile-time or for an aggregate type that's a - literal constant. */ - STRIP_NOPS (arg); - - /* If we know this is a constant, emit the constant of one. */ - if (CONSTANT_CLASS_P (arg) - || (TREE_CODE (arg) == CONSTRUCTOR - && TREE_CONSTANT (arg))) - return integer_one_node; - if (TREE_CODE (arg) == ADDR_EXPR) - { - tree op = TREE_OPERAND (arg, 0); - if (TREE_CODE (op) == STRING_CST - || (TREE_CODE (op) == ARRAY_REF - && integer_zerop (TREE_OPERAND (op, 1)) - && TREE_CODE (TREE_OPERAND (op, 0)) == STRING_CST)) - return integer_one_node; - } - - /* If this expression has side effects, show we don't know it to be a - constant. Likewise if it's a pointer or aggregate type since in - those case we only want literals, since those are only optimized - when generating RTL, not later. - And finally, if we are compiling an initializer, not code, we - need to return a definite result now; there's not going to be any - more optimization done. */ - if (TREE_SIDE_EFFECTS (arg) - || AGGREGATE_TYPE_P (TREE_TYPE (arg)) - || POINTER_TYPE_P (TREE_TYPE (arg)) - || cfun == 0 - || folding_initializer - || force_folding_builtin_constant_p) - return integer_zero_node; - - return NULL_TREE; -} - -/* Create builtin_expect or builtin_expect_with_probability - with PRED and EXPECTED as its arguments and return it as a truthvalue. - Fortran FE can also produce builtin_expect with PREDICTOR as third argument. - builtin_expect_with_probability instead uses third argument as PROBABILITY - value. */ - -static tree -build_builtin_expect_predicate (location_t loc, tree pred, tree expected, - tree predictor, tree probability) -{ - tree fn, arg_types, pred_type, expected_type, call_expr, ret_type; - - fn = builtin_decl_explicit (probability == NULL_TREE ? BUILT_IN_EXPECT - : BUILT_IN_EXPECT_WITH_PROBABILITY); - arg_types = TYPE_ARG_TYPES (TREE_TYPE (fn)); - ret_type = TREE_TYPE (TREE_TYPE (fn)); - pred_type = TREE_VALUE (arg_types); - expected_type = TREE_VALUE (TREE_CHAIN (arg_types)); - - pred = fold_convert_loc (loc, pred_type, pred); - expected = fold_convert_loc (loc, expected_type, expected); - - if (probability) - call_expr = build_call_expr_loc (loc, fn, 3, pred, expected, probability); - else - call_expr = build_call_expr_loc (loc, fn, predictor ? 3 : 2, pred, expected, - predictor); - - return build2 (NE_EXPR, TREE_TYPE (pred), call_expr, - build_int_cst (ret_type, 0)); -} - -/* Fold a call to builtin_expect with arguments ARG0, ARG1, ARG2, ARG3. Return - NULL_TREE if no simplification is possible. */ - -tree -fold_builtin_expect (location_t loc, tree arg0, tree arg1, tree arg2, - tree arg3) -{ - tree inner, fndecl, inner_arg0; - enum tree_code code; - - /* Distribute the expected value over short-circuiting operators. - See through the cast from truthvalue_type_node to long. */ - inner_arg0 = arg0; - while (CONVERT_EXPR_P (inner_arg0) - && INTEGRAL_TYPE_P (TREE_TYPE (inner_arg0)) - && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (inner_arg0, 0)))) - inner_arg0 = TREE_OPERAND (inner_arg0, 0); - - /* If this is a builtin_expect within a builtin_expect keep the - inner one. See through a comparison against a constant. It - might have been added to create a thruthvalue. */ - inner = inner_arg0; - - if (COMPARISON_CLASS_P (inner) - && TREE_CODE (TREE_OPERAND (inner, 1)) == INTEGER_CST) - inner = TREE_OPERAND (inner, 0); - - if (TREE_CODE (inner) == CALL_EXPR - && (fndecl = get_callee_fndecl (inner)) - && (fndecl_built_in_p (fndecl, BUILT_IN_EXPECT) - || fndecl_built_in_p (fndecl, BUILT_IN_EXPECT_WITH_PROBABILITY))) - return arg0; - - inner = inner_arg0; - code = TREE_CODE (inner); - if (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR) - { - tree op0 = TREE_OPERAND (inner, 0); - tree op1 = TREE_OPERAND (inner, 1); - arg1 = save_expr (arg1); - - op0 = build_builtin_expect_predicate (loc, op0, arg1, arg2, arg3); - op1 = build_builtin_expect_predicate (loc, op1, arg1, arg2, arg3); - inner = build2 (code, TREE_TYPE (inner), op0, op1); - - return fold_convert_loc (loc, TREE_TYPE (arg0), inner); - } - - /* If the argument isn't invariant then there's nothing else we can do. */ - if (!TREE_CONSTANT (inner_arg0)) - return NULL_TREE; - - /* If we expect that a comparison against the argument will fold to - a constant return the constant. In practice, this means a true - constant or the address of a non-weak symbol. */ - inner = inner_arg0; - STRIP_NOPS (inner); - if (TREE_CODE (inner) == ADDR_EXPR) - { - do - { - inner = TREE_OPERAND (inner, 0); - } - while (TREE_CODE (inner) == COMPONENT_REF - || TREE_CODE (inner) == ARRAY_REF); - if (VAR_OR_FUNCTION_DECL_P (inner) && DECL_WEAK (inner)) - return NULL_TREE; - } - - /* Otherwise, ARG0 already has the proper type for the return value. */ - return arg0; -} - -/* Fold a call to __builtin_classify_type with argument ARG. */ - -static tree -fold_builtin_classify_type (tree arg) -{ - if (arg == 0) - return build_int_cst (integer_type_node, no_type_class); - - return build_int_cst (integer_type_node, type_to_class (TREE_TYPE (arg))); -} - -/* Fold a call EXPR (which may be null) to __builtin_strlen with argument - ARG. */ - -static tree -fold_builtin_strlen (location_t loc, tree expr, tree type, tree arg) -{ - if (!validate_arg (arg, POINTER_TYPE)) - return NULL_TREE; - else - { - c_strlen_data lendata = { }; - tree len = c_strlen (arg, 0, &lendata); - - if (len) - return fold_convert_loc (loc, type, len); - - if (!lendata.decl) - c_strlen (arg, 1, &lendata); - - if (lendata.decl) - { - if (EXPR_HAS_LOCATION (arg)) - loc = EXPR_LOCATION (arg); - else if (loc == UNKNOWN_LOCATION) - loc = input_location; - warn_string_no_nul (loc, expr, "strlen", arg, lendata.decl); - } - - return NULL_TREE; - } -} - -/* Fold a call to __builtin_inf or __builtin_huge_val. */ - -static tree -fold_builtin_inf (location_t loc, tree type, int warn) -{ - REAL_VALUE_TYPE real; - - /* __builtin_inff is intended to be usable to define INFINITY on all - targets. If an infinity is not available, INFINITY expands "to a - positive constant of type float that overflows at translation - time", footnote "In this case, using INFINITY will violate the - constraint in 6.4.4 and thus require a diagnostic." (C99 7.12#4). - Thus we pedwarn to ensure this constraint violation is - diagnosed. */ - if (!MODE_HAS_INFINITIES (TYPE_MODE (type)) && warn) - pedwarn (loc, 0, "target format does not support infinity"); - - real_inf (&real); - return build_real (type, real); -} - -/* Fold function call to builtin sincos, sincosf, or sincosl. Return - NULL_TREE if no simplification can be made. */ - -static tree -fold_builtin_sincos (location_t loc, - tree arg0, tree arg1, tree arg2) -{ - tree type; - tree fndecl, call = NULL_TREE; - - if (!validate_arg (arg0, REAL_TYPE) - || !validate_arg (arg1, POINTER_TYPE) - || !validate_arg (arg2, POINTER_TYPE)) - return NULL_TREE; - - type = TREE_TYPE (arg0); - - /* Calculate the result when the argument is a constant. */ - built_in_function fn = mathfn_built_in_2 (type, CFN_BUILT_IN_CEXPI); - if (fn == END_BUILTINS) - return NULL_TREE; - - /* Canonicalize sincos to cexpi. */ - if (TREE_CODE (arg0) == REAL_CST) - { - tree complex_type = build_complex_type (type); - call = fold_const_call (as_combined_fn (fn), complex_type, arg0); - } - if (!call) - { - if (!targetm.libc_has_function (function_c99_math_complex, type) - || !builtin_decl_implicit_p (fn)) - return NULL_TREE; - fndecl = builtin_decl_explicit (fn); - call = build_call_expr_loc (loc, fndecl, 1, arg0); - call = builtin_save_expr (call); - } - - tree ptype = build_pointer_type (type); - arg1 = fold_convert (ptype, arg1); - arg2 = fold_convert (ptype, arg2); - return build2 (COMPOUND_EXPR, void_type_node, - build2 (MODIFY_EXPR, void_type_node, - build_fold_indirect_ref_loc (loc, arg1), - fold_build1_loc (loc, IMAGPART_EXPR, type, call)), - build2 (MODIFY_EXPR, void_type_node, - build_fold_indirect_ref_loc (loc, arg2), - fold_build1_loc (loc, REALPART_EXPR, type, call))); -} - -/* Fold function call to builtin memcmp with arguments ARG1 and ARG2. - Return NULL_TREE if no simplification can be made. */ - -static tree -fold_builtin_memcmp (location_t loc, tree arg1, tree arg2, tree len) -{ - if (!validate_arg (arg1, POINTER_TYPE) - || !validate_arg (arg2, POINTER_TYPE) - || !validate_arg (len, INTEGER_TYPE)) - return NULL_TREE; - - /* If the LEN parameter is zero, return zero. */ - if (integer_zerop (len)) - return omit_two_operands_loc (loc, integer_type_node, integer_zero_node, - arg1, arg2); - - /* If ARG1 and ARG2 are the same (and not volatile), return zero. */ - if (operand_equal_p (arg1, arg2, 0)) - return omit_one_operand_loc (loc, integer_type_node, integer_zero_node, len); - - /* If len parameter is one, return an expression corresponding to - (*(const unsigned char*)arg1 - (const unsigned char*)arg2). */ - if (tree_fits_uhwi_p (len) && tree_to_uhwi (len) == 1) - { - tree cst_uchar_node = build_type_variant (unsigned_char_type_node, 1, 0); - tree cst_uchar_ptr_node - = build_pointer_type_for_mode (cst_uchar_node, ptr_mode, true); - - tree ind1 - = fold_convert_loc (loc, integer_type_node, - build1 (INDIRECT_REF, cst_uchar_node, - fold_convert_loc (loc, - cst_uchar_ptr_node, - arg1))); - tree ind2 - = fold_convert_loc (loc, integer_type_node, - build1 (INDIRECT_REF, cst_uchar_node, - fold_convert_loc (loc, - cst_uchar_ptr_node, - arg2))); - return fold_build2_loc (loc, MINUS_EXPR, integer_type_node, ind1, ind2); - } - - return NULL_TREE; -} - -/* Fold a call to builtin isascii with argument ARG. */ - -static tree -fold_builtin_isascii (location_t loc, tree arg) -{ - if (!validate_arg (arg, INTEGER_TYPE)) - return NULL_TREE; - else - { - /* Transform isascii(c) -> ((c & ~0x7f) == 0). */ - arg = fold_build2 (BIT_AND_EXPR, integer_type_node, arg, - build_int_cst (integer_type_node, - ~ (unsigned HOST_WIDE_INT) 0x7f)); - return fold_build2_loc (loc, EQ_EXPR, integer_type_node, - arg, integer_zero_node); - } -} - -/* Fold a call to builtin toascii with argument ARG. */ - -static tree -fold_builtin_toascii (location_t loc, tree arg) -{ - if (!validate_arg (arg, INTEGER_TYPE)) - return NULL_TREE; - - /* Transform toascii(c) -> (c & 0x7f). */ - return fold_build2_loc (loc, BIT_AND_EXPR, integer_type_node, arg, - build_int_cst (integer_type_node, 0x7f)); -} - -/* Fold a call to builtin isdigit with argument ARG. */ - -static tree -fold_builtin_isdigit (location_t loc, tree arg) -{ - if (!validate_arg (arg, INTEGER_TYPE)) - return NULL_TREE; - else - { - /* Transform isdigit(c) -> (unsigned)(c) - '0' <= 9. */ - /* According to the C standard, isdigit is unaffected by locale. - However, it definitely is affected by the target character set. */ - unsigned HOST_WIDE_INT target_digit0 - = lang_hooks.to_target_charset ('0'); - - if (target_digit0 == 0) - return NULL_TREE; - - arg = fold_convert_loc (loc, unsigned_type_node, arg); - arg = fold_build2 (MINUS_EXPR, unsigned_type_node, arg, - build_int_cst (unsigned_type_node, target_digit0)); - return fold_build2_loc (loc, LE_EXPR, integer_type_node, arg, - build_int_cst (unsigned_type_node, 9)); - } -} - -/* Fold a call to fabs, fabsf or fabsl with argument ARG. */ - -static tree -fold_builtin_fabs (location_t loc, tree arg, tree type) -{ - if (!validate_arg (arg, REAL_TYPE)) - return NULL_TREE; - - arg = fold_convert_loc (loc, type, arg); - return fold_build1_loc (loc, ABS_EXPR, type, arg); -} - -/* Fold a call to abs, labs, llabs or imaxabs with argument ARG. */ - -static tree -fold_builtin_abs (location_t loc, tree arg, tree type) -{ - if (!validate_arg (arg, INTEGER_TYPE)) - return NULL_TREE; - - arg = fold_convert_loc (loc, type, arg); - return fold_build1_loc (loc, ABS_EXPR, type, arg); -} - -/* Fold a call to builtin carg(a+bi) -> atan2(b,a). */ - -static tree -fold_builtin_carg (location_t loc, tree arg, tree type) -{ - if (validate_arg (arg, COMPLEX_TYPE) - && TREE_CODE (TREE_TYPE (TREE_TYPE (arg))) == REAL_TYPE) - { - tree atan2_fn = mathfn_built_in (type, BUILT_IN_ATAN2); - - if (atan2_fn) - { - tree new_arg = builtin_save_expr (arg); - tree r_arg = fold_build1_loc (loc, REALPART_EXPR, type, new_arg); - tree i_arg = fold_build1_loc (loc, IMAGPART_EXPR, type, new_arg); - return build_call_expr_loc (loc, atan2_fn, 2, i_arg, r_arg); - } - } - - return NULL_TREE; -} - -/* Fold a call to builtin frexp, we can assume the base is 2. */ - -static tree -fold_builtin_frexp (location_t loc, tree arg0, tree arg1, tree rettype) -{ - if (! validate_arg (arg0, REAL_TYPE) || ! validate_arg (arg1, POINTER_TYPE)) - return NULL_TREE; - - STRIP_NOPS (arg0); - - if (!(TREE_CODE (arg0) == REAL_CST && ! TREE_OVERFLOW (arg0))) - return NULL_TREE; - - arg1 = build_fold_indirect_ref_loc (loc, arg1); - - /* Proceed if a valid pointer type was passed in. */ - if (TYPE_MAIN_VARIANT (TREE_TYPE (arg1)) == integer_type_node) - { - const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (arg0); - tree frac, exp; - - switch (value->cl) + case BUILT_IN_ATOMIC_ADD_FETCH_1: + case BUILT_IN_ATOMIC_ADD_FETCH_2: + case BUILT_IN_ATOMIC_ADD_FETCH_4: + case BUILT_IN_ATOMIC_ADD_FETCH_8: + case BUILT_IN_ATOMIC_ADD_FETCH_16: + { + enum built_in_function lib; + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_ADD_FETCH_1); + lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_ADD_1 + + (fcode - BUILT_IN_ATOMIC_ADD_FETCH_1)); + target = expand_builtin_atomic_fetch_op (mode, exp, target, PLUS, true, + ignore, lib); + if (target) + return target; + break; + } + case BUILT_IN_ATOMIC_SUB_FETCH_1: + case BUILT_IN_ATOMIC_SUB_FETCH_2: + case BUILT_IN_ATOMIC_SUB_FETCH_4: + case BUILT_IN_ATOMIC_SUB_FETCH_8: + case BUILT_IN_ATOMIC_SUB_FETCH_16: { - case rvc_zero: - /* For +-0, return (*exp = 0, +-0). */ - exp = integer_zero_node; - frac = arg0; + enum built_in_function lib; + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_SUB_FETCH_1); + lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_SUB_1 + + (fcode - BUILT_IN_ATOMIC_SUB_FETCH_1)); + target = expand_builtin_atomic_fetch_op (mode, exp, target, MINUS, true, + ignore, lib); + if (target) + return target; break; - case rvc_nan: - case rvc_inf: - /* For +-NaN or +-Inf, *exp is unspecified, return arg0. */ - return omit_one_operand_loc (loc, rettype, arg0, arg1); - case rvc_normal: - { - /* Since the frexp function always expects base 2, and in - GCC normalized significands are already in the range - [0.5, 1.0), we have exactly what frexp wants. */ - REAL_VALUE_TYPE frac_rvt = *value; - SET_REAL_EXP (&frac_rvt, 0); - frac = build_real (rettype, frac_rvt); - exp = build_int_cst (integer_type_node, REAL_EXP (value)); - } + } + case BUILT_IN_ATOMIC_AND_FETCH_1: + case BUILT_IN_ATOMIC_AND_FETCH_2: + case BUILT_IN_ATOMIC_AND_FETCH_4: + case BUILT_IN_ATOMIC_AND_FETCH_8: + case BUILT_IN_ATOMIC_AND_FETCH_16: + { + enum built_in_function lib; + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_AND_FETCH_1); + lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_AND_1 + + (fcode - BUILT_IN_ATOMIC_AND_FETCH_1)); + target = expand_builtin_atomic_fetch_op (mode, exp, target, AND, true, + ignore, lib); + if (target) + return target; + break; + } + case BUILT_IN_ATOMIC_NAND_FETCH_1: + case BUILT_IN_ATOMIC_NAND_FETCH_2: + case BUILT_IN_ATOMIC_NAND_FETCH_4: + case BUILT_IN_ATOMIC_NAND_FETCH_8: + case BUILT_IN_ATOMIC_NAND_FETCH_16: + { + enum built_in_function lib; + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_NAND_FETCH_1); + lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_NAND_1 + + (fcode - BUILT_IN_ATOMIC_NAND_FETCH_1)); + target = expand_builtin_atomic_fetch_op (mode, exp, target, NOT, true, + ignore, lib); + if (target) + return target; + break; + } + case BUILT_IN_ATOMIC_XOR_FETCH_1: + case BUILT_IN_ATOMIC_XOR_FETCH_2: + case BUILT_IN_ATOMIC_XOR_FETCH_4: + case BUILT_IN_ATOMIC_XOR_FETCH_8: + case BUILT_IN_ATOMIC_XOR_FETCH_16: + { + enum built_in_function lib; + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_XOR_FETCH_1); + lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_XOR_1 + + (fcode - BUILT_IN_ATOMIC_XOR_FETCH_1)); + target = expand_builtin_atomic_fetch_op (mode, exp, target, XOR, true, + ignore, lib); + if (target) + return target; + break; + } + case BUILT_IN_ATOMIC_OR_FETCH_1: + case BUILT_IN_ATOMIC_OR_FETCH_2: + case BUILT_IN_ATOMIC_OR_FETCH_4: + case BUILT_IN_ATOMIC_OR_FETCH_8: + case BUILT_IN_ATOMIC_OR_FETCH_16: + { + enum built_in_function lib; + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_OR_FETCH_1); + lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_OR_1 + + (fcode - BUILT_IN_ATOMIC_OR_FETCH_1)); + target = expand_builtin_atomic_fetch_op (mode, exp, target, IOR, true, + ignore, lib); + if (target) + return target; break; - default: - gcc_unreachable (); } + case BUILT_IN_ATOMIC_FETCH_ADD_1: + case BUILT_IN_ATOMIC_FETCH_ADD_2: + case BUILT_IN_ATOMIC_FETCH_ADD_4: + case BUILT_IN_ATOMIC_FETCH_ADD_8: + case BUILT_IN_ATOMIC_FETCH_ADD_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_ADD_1); + target = expand_builtin_atomic_fetch_op (mode, exp, target, PLUS, false, + ignore, BUILT_IN_NONE); + if (target) + return target; + break; + + case BUILT_IN_ATOMIC_FETCH_SUB_1: + case BUILT_IN_ATOMIC_FETCH_SUB_2: + case BUILT_IN_ATOMIC_FETCH_SUB_4: + case BUILT_IN_ATOMIC_FETCH_SUB_8: + case BUILT_IN_ATOMIC_FETCH_SUB_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_SUB_1); + target = expand_builtin_atomic_fetch_op (mode, exp, target, MINUS, false, + ignore, BUILT_IN_NONE); + if (target) + return target; + break; - /* Create the COMPOUND_EXPR (*arg1 = trunc, frac). */ - arg1 = fold_build2_loc (loc, MODIFY_EXPR, rettype, arg1, exp); - TREE_SIDE_EFFECTS (arg1) = 1; - return fold_build2_loc (loc, COMPOUND_EXPR, rettype, arg1, frac); - } + case BUILT_IN_ATOMIC_FETCH_AND_1: + case BUILT_IN_ATOMIC_FETCH_AND_2: + case BUILT_IN_ATOMIC_FETCH_AND_4: + case BUILT_IN_ATOMIC_FETCH_AND_8: + case BUILT_IN_ATOMIC_FETCH_AND_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_AND_1); + target = expand_builtin_atomic_fetch_op (mode, exp, target, AND, false, + ignore, BUILT_IN_NONE); + if (target) + return target; + break; + + case BUILT_IN_ATOMIC_FETCH_NAND_1: + case BUILT_IN_ATOMIC_FETCH_NAND_2: + case BUILT_IN_ATOMIC_FETCH_NAND_4: + case BUILT_IN_ATOMIC_FETCH_NAND_8: + case BUILT_IN_ATOMIC_FETCH_NAND_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_NAND_1); + target = expand_builtin_atomic_fetch_op (mode, exp, target, NOT, false, + ignore, BUILT_IN_NONE); + if (target) + return target; + break; + + case BUILT_IN_ATOMIC_FETCH_XOR_1: + case BUILT_IN_ATOMIC_FETCH_XOR_2: + case BUILT_IN_ATOMIC_FETCH_XOR_4: + case BUILT_IN_ATOMIC_FETCH_XOR_8: + case BUILT_IN_ATOMIC_FETCH_XOR_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_XOR_1); + target = expand_builtin_atomic_fetch_op (mode, exp, target, XOR, false, + ignore, BUILT_IN_NONE); + if (target) + return target; + break; + + case BUILT_IN_ATOMIC_FETCH_OR_1: + case BUILT_IN_ATOMIC_FETCH_OR_2: + case BUILT_IN_ATOMIC_FETCH_OR_4: + case BUILT_IN_ATOMIC_FETCH_OR_8: + case BUILT_IN_ATOMIC_FETCH_OR_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_OR_1); + target = expand_builtin_atomic_fetch_op (mode, exp, target, IOR, false, + ignore, BUILT_IN_NONE); + if (target) + return target; + break; - return NULL_TREE; -} + case BUILT_IN_ATOMIC_TEST_AND_SET: + return expand_builtin_atomic_test_and_set (exp, target); -/* Fold a call to builtin modf. */ + case BUILT_IN_ATOMIC_CLEAR: + return expand_builtin_atomic_clear (exp); + + case BUILT_IN_ATOMIC_ALWAYS_LOCK_FREE: + return expand_builtin_atomic_always_lock_free (exp); -static tree -fold_builtin_modf (location_t loc, tree arg0, tree arg1, tree rettype) -{ - if (! validate_arg (arg0, REAL_TYPE) || ! validate_arg (arg1, POINTER_TYPE)) - return NULL_TREE; + case BUILT_IN_ATOMIC_IS_LOCK_FREE: + target = expand_builtin_atomic_is_lock_free (exp); + if (target) + return target; + break; - STRIP_NOPS (arg0); + case BUILT_IN_ATOMIC_THREAD_FENCE: + expand_builtin_atomic_thread_fence (exp); + return const0_rtx; - if (!(TREE_CODE (arg0) == REAL_CST && ! TREE_OVERFLOW (arg0))) - return NULL_TREE; + case BUILT_IN_ATOMIC_SIGNAL_FENCE: + expand_builtin_atomic_signal_fence (exp); + return const0_rtx; - arg1 = build_fold_indirect_ref_loc (loc, arg1); + case BUILT_IN_OBJECT_SIZE: + return expand_builtin_object_size (exp); - /* Proceed if a valid pointer type was passed in. */ - if (TYPE_MAIN_VARIANT (TREE_TYPE (arg1)) == TYPE_MAIN_VARIANT (rettype)) - { - const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (arg0); - REAL_VALUE_TYPE trunc, frac; + case BUILT_IN_MEMCPY_CHK: + case BUILT_IN_MEMPCPY_CHK: + case BUILT_IN_MEMMOVE_CHK: + case BUILT_IN_MEMSET_CHK: + target = expand_builtin_memory_chk (exp, target, mode, fcode); + if (target) + return target; + break; - switch (value->cl) - { - case rvc_nan: - case rvc_zero: - /* For +-NaN or +-0, return (*arg1 = arg0, arg0). */ - trunc = frac = *value; - break; - case rvc_inf: - /* For +-Inf, return (*arg1 = arg0, +-0). */ - frac = dconst0; - frac.sign = value->sign; - trunc = *value; - break; - case rvc_normal: - /* Return (*arg1 = trunc(arg0), arg0-trunc(arg0)). */ - real_trunc (&trunc, VOIDmode, value); - real_arithmetic (&frac, MINUS_EXPR, value, &trunc); - /* If the original number was negative and already - integral, then the fractional part is -0.0. */ - if (value->sign && frac.cl == rvc_zero) - frac.sign = value->sign; - break; - } + case BUILT_IN_STRCPY_CHK: + case BUILT_IN_STPCPY_CHK: + case BUILT_IN_STRNCPY_CHK: + case BUILT_IN_STPNCPY_CHK: + case BUILT_IN_STRCAT_CHK: + case BUILT_IN_STRNCAT_CHK: + case BUILT_IN_SNPRINTF_CHK: + case BUILT_IN_VSNPRINTF_CHK: + maybe_emit_chk_warning (exp, fcode); + break; + + case BUILT_IN_SPRINTF_CHK: + case BUILT_IN_VSPRINTF_CHK: + maybe_emit_sprintf_chk_warning (exp, fcode); + break; - /* Create the COMPOUND_EXPR (*arg1 = trunc, frac). */ - arg1 = fold_build2_loc (loc, MODIFY_EXPR, rettype, arg1, - build_real (rettype, trunc)); - TREE_SIDE_EFFECTS (arg1) = 1; - return fold_build2_loc (loc, COMPOUND_EXPR, rettype, arg1, - build_real (rettype, frac)); + case BUILT_IN_THREAD_POINTER: + return expand_builtin_thread_pointer (exp, target); + + case BUILT_IN_SET_THREAD_POINTER: + expand_builtin_set_thread_pointer (exp); + return const0_rtx; + + case BUILT_IN_ACC_ON_DEVICE: + /* Do library call, if we failed to expand the builtin when + folding. */ + break; + + case BUILT_IN_GOACC_PARLEVEL_ID: + case BUILT_IN_GOACC_PARLEVEL_SIZE: + return expand_builtin_goacc_parlevel_id_size (exp, target, ignore); + + case BUILT_IN_SPECULATION_SAFE_VALUE_PTR: + return expand_speculation_safe_value (VOIDmode, exp, target, ignore); + + case BUILT_IN_SPECULATION_SAFE_VALUE_1: + case BUILT_IN_SPECULATION_SAFE_VALUE_2: + case BUILT_IN_SPECULATION_SAFE_VALUE_4: + case BUILT_IN_SPECULATION_SAFE_VALUE_8: + case BUILT_IN_SPECULATION_SAFE_VALUE_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SPECULATION_SAFE_VALUE_1); + return expand_speculation_safe_value (mode, exp, target, ignore); + + default: /* just do library call, if unknown builtin */ + break; } - return NULL_TREE; + /* The switch statement above can drop through to cause the function + to be called normally. */ + return expand_call (exp, target, ignore); } -/* Given a location LOC, an interclass builtin function decl FNDECL - and its single argument ARG, return an folded expression computing - the same, or NULL_TREE if we either couldn't or didn't want to fold - (the latter happen if there's an RTL instruction available). */ +/* Determine whether a tree node represents a call to a built-in + function. If the tree T is a call to a built-in function with + the right number of arguments of the appropriate types, return + the DECL_FUNCTION_CODE of the call, e.g. BUILT_IN_SQRT. + Otherwise the return value is END_BUILTINS. */ -static tree -fold_builtin_interclass_mathfn (location_t loc, tree fndecl, tree arg) +enum built_in_function +builtin_mathfn_code (const_tree t) { - machine_mode mode; - - if (!validate_arg (arg, REAL_TYPE)) - return NULL_TREE; - - if (interclass_mathfn_icode (arg, fndecl) != CODE_FOR_nothing) - return NULL_TREE; + const_tree fndecl, arg, parmlist; + const_tree argtype, parmtype; + const_call_expr_arg_iterator iter; - mode = TYPE_MODE (TREE_TYPE (arg)); + if (TREE_CODE (t) != CALL_EXPR) + return END_BUILTINS; - bool is_ibm_extended = MODE_COMPOSITE_P (mode); + fndecl = get_callee_fndecl (t); + if (fndecl == NULL_TREE || !fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)) + return END_BUILTINS; - /* If there is no optab, try generic code. */ - switch (DECL_FUNCTION_CODE (fndecl)) + parmlist = TYPE_ARG_TYPES (TREE_TYPE (fndecl)); + init_const_call_expr_arg_iterator (t, &iter); + for (; parmlist; parmlist = TREE_CHAIN (parmlist)) { - tree result; + /* If a function doesn't take a variable number of arguments, + the last element in the list will have type `void'. */ + parmtype = TREE_VALUE (parmlist); + if (VOID_TYPE_P (parmtype)) + { + if (more_const_call_expr_args_p (&iter)) + return END_BUILTINS; + return DECL_FUNCTION_CODE (fndecl); + } - CASE_FLT_FN (BUILT_IN_ISINF): - { - /* isinf(x) -> isgreater(fabs(x),DBL_MAX). */ - tree const isgr_fn = builtin_decl_explicit (BUILT_IN_ISGREATER); - tree type = TREE_TYPE (arg); - REAL_VALUE_TYPE r; - char buf[128]; + if (! more_const_call_expr_args_p (&iter)) + return END_BUILTINS; - if (is_ibm_extended) - { - /* NaN and Inf are encoded in the high-order double value - only. The low-order value is not significant. */ - type = double_type_node; - mode = DFmode; - arg = fold_build1_loc (loc, NOP_EXPR, type, arg); - } - get_max_float (REAL_MODE_FORMAT (mode), buf, sizeof (buf), false); - real_from_string (&r, buf); - result = build_call_expr (isgr_fn, 2, - fold_build1_loc (loc, ABS_EXPR, type, arg), - build_real (type, r)); - return result; - } - CASE_FLT_FN (BUILT_IN_FINITE): - case BUILT_IN_ISFINITE: - { - /* isfinite(x) -> islessequal(fabs(x),DBL_MAX). */ - tree const isle_fn = builtin_decl_explicit (BUILT_IN_ISLESSEQUAL); - tree type = TREE_TYPE (arg); - REAL_VALUE_TYPE r; - char buf[128]; + arg = next_const_call_expr_arg (&iter); + argtype = TREE_TYPE (arg); - if (is_ibm_extended) - { - /* NaN and Inf are encoded in the high-order double value - only. The low-order value is not significant. */ - type = double_type_node; - mode = DFmode; - arg = fold_build1_loc (loc, NOP_EXPR, type, arg); - } - get_max_float (REAL_MODE_FORMAT (mode), buf, sizeof (buf), false); - real_from_string (&r, buf); - result = build_call_expr (isle_fn, 2, - fold_build1_loc (loc, ABS_EXPR, type, arg), - build_real (type, r)); - /*result = fold_build2_loc (loc, UNGT_EXPR, - TREE_TYPE (TREE_TYPE (fndecl)), - fold_build1_loc (loc, ABS_EXPR, type, arg), - build_real (type, r)); - result = fold_build1_loc (loc, TRUTH_NOT_EXPR, - TREE_TYPE (TREE_TYPE (fndecl)), - result);*/ - return result; - } - case BUILT_IN_ISNORMAL: - { - /* isnormal(x) -> isgreaterequal(fabs(x),DBL_MIN) & - islessequal(fabs(x),DBL_MAX). */ - tree const isle_fn = builtin_decl_explicit (BUILT_IN_ISLESSEQUAL); - tree type = TREE_TYPE (arg); - tree orig_arg, max_exp, min_exp; - machine_mode orig_mode = mode; - REAL_VALUE_TYPE rmax, rmin; - char buf[128]; + if (SCALAR_FLOAT_TYPE_P (parmtype)) + { + if (! SCALAR_FLOAT_TYPE_P (argtype)) + return END_BUILTINS; + } + else if (COMPLEX_FLOAT_TYPE_P (parmtype)) + { + if (! COMPLEX_FLOAT_TYPE_P (argtype)) + return END_BUILTINS; + } + else if (POINTER_TYPE_P (parmtype)) + { + if (! POINTER_TYPE_P (argtype)) + return END_BUILTINS; + } + else if (INTEGRAL_TYPE_P (parmtype)) + { + if (! INTEGRAL_TYPE_P (argtype)) + return END_BUILTINS; + } + else + return END_BUILTINS; + } - orig_arg = arg = builtin_save_expr (arg); - if (is_ibm_extended) - { - /* Use double to test the normal range of IBM extended - precision. Emin for IBM extended precision is - different to emin for IEEE double, being 53 higher - since the low double exponent is at least 53 lower - than the high double exponent. */ - type = double_type_node; - mode = DFmode; - arg = fold_build1_loc (loc, NOP_EXPR, type, arg); - } - arg = fold_build1_loc (loc, ABS_EXPR, type, arg); + /* Variable-length argument list. */ + return DECL_FUNCTION_CODE (fndecl); +} - get_max_float (REAL_MODE_FORMAT (mode), buf, sizeof (buf), false); - real_from_string (&rmax, buf); - sprintf (buf, "0x1p%d", REAL_MODE_FORMAT (orig_mode)->emin - 1); - real_from_string (&rmin, buf); - max_exp = build_real (type, rmax); - min_exp = build_real (type, rmin); +/* Fold a call to __builtin_constant_p, if we know its argument ARG will + evaluate to a constant. */ - max_exp = build_call_expr (isle_fn, 2, arg, max_exp); - if (is_ibm_extended) - { - /* Testing the high end of the range is done just using - the high double, using the same test as isfinite(). - For the subnormal end of the range we first test the - high double, then if its magnitude is equal to the - limit of 0x1p-969, we test whether the low double is - non-zero and opposite sign to the high double. */ - tree const islt_fn = builtin_decl_explicit (BUILT_IN_ISLESS); - tree const isgt_fn = builtin_decl_explicit (BUILT_IN_ISGREATER); - tree gt_min = build_call_expr (isgt_fn, 2, arg, min_exp); - tree eq_min = fold_build2 (EQ_EXPR, integer_type_node, - arg, min_exp); - tree as_complex = build1 (VIEW_CONVERT_EXPR, - complex_double_type_node, orig_arg); - tree hi_dbl = build1 (REALPART_EXPR, type, as_complex); - tree lo_dbl = build1 (IMAGPART_EXPR, type, as_complex); - tree zero = build_real (type, dconst0); - tree hilt = build_call_expr (islt_fn, 2, hi_dbl, zero); - tree lolt = build_call_expr (islt_fn, 2, lo_dbl, zero); - tree logt = build_call_expr (isgt_fn, 2, lo_dbl, zero); - tree ok_lo = fold_build1 (TRUTH_NOT_EXPR, integer_type_node, - fold_build3 (COND_EXPR, - integer_type_node, - hilt, logt, lolt)); - eq_min = fold_build2 (TRUTH_ANDIF_EXPR, integer_type_node, - eq_min, ok_lo); - min_exp = fold_build2 (TRUTH_ORIF_EXPR, integer_type_node, - gt_min, eq_min); - } - else - { - tree const isge_fn - = builtin_decl_explicit (BUILT_IN_ISGREATEREQUAL); - min_exp = build_call_expr (isge_fn, 2, arg, min_exp); - } - result = fold_build2 (BIT_AND_EXPR, integer_type_node, - max_exp, min_exp); - return result; - } - default: - break; +static tree +fold_builtin_constant_p (tree arg) +{ + /* We return 1 for a numeric type that's known to be a constant + value at compile-time or for an aggregate type that's a + literal constant. */ + STRIP_NOPS (arg); + + /* If we know this is a constant, emit the constant of one. */ + if (CONSTANT_CLASS_P (arg) + || (TREE_CODE (arg) == CONSTRUCTOR + && TREE_CONSTANT (arg))) + return integer_one_node; + if (TREE_CODE (arg) == ADDR_EXPR) + { + tree op = TREE_OPERAND (arg, 0); + if (TREE_CODE (op) == STRING_CST + || (TREE_CODE (op) == ARRAY_REF + && integer_zerop (TREE_OPERAND (op, 1)) + && TREE_CODE (TREE_OPERAND (op, 0)) == STRING_CST)) + return integer_one_node; } + /* If this expression has side effects, show we don't know it to be a + constant. Likewise if it's a pointer or aggregate type since in + those case we only want literals, since those are only optimized + when generating RTL, not later. + And finally, if we are compiling an initializer, not code, we + need to return a definite result now; there's not going to be any + more optimization done. */ + if (TREE_SIDE_EFFECTS (arg) + || AGGREGATE_TYPE_P (TREE_TYPE (arg)) + || POINTER_TYPE_P (TREE_TYPE (arg)) + || cfun == 0 + || folding_initializer + || force_folding_builtin_constant_p) + return integer_zero_node; + return NULL_TREE; } -/* Fold a call to __builtin_isnan(), __builtin_isinf, __builtin_finite. - ARG is the argument for the call. */ +/* Create builtin_expect or builtin_expect_with_probability + with PRED and EXPECTED as its arguments and return it as a truthvalue. + Fortran FE can also produce builtin_expect with PREDICTOR as third argument. + builtin_expect_with_probability instead uses third argument as PROBABILITY + value. */ static tree -fold_builtin_classify (location_t loc, tree fndecl, tree arg, int builtin_index) +build_builtin_expect_predicate (location_t loc, tree pred, tree expected, + tree predictor, tree probability) { - tree type = TREE_TYPE (TREE_TYPE (fndecl)); + tree fn, arg_types, pred_type, expected_type, call_expr, ret_type; - if (!validate_arg (arg, REAL_TYPE)) - return NULL_TREE; + fn = builtin_decl_explicit (probability == NULL_TREE ? BUILT_IN_EXPECT + : BUILT_IN_EXPECT_WITH_PROBABILITY); + arg_types = TYPE_ARG_TYPES (TREE_TYPE (fn)); + ret_type = TREE_TYPE (TREE_TYPE (fn)); + pred_type = TREE_VALUE (arg_types); + expected_type = TREE_VALUE (TREE_CHAIN (arg_types)); - switch (builtin_index) - { - case BUILT_IN_ISINF: - if (tree_expr_infinite_p (arg)) - return omit_one_operand_loc (loc, type, integer_one_node, arg); - if (!tree_expr_maybe_infinite_p (arg)) - return omit_one_operand_loc (loc, type, integer_zero_node, arg); - return NULL_TREE; + pred = fold_convert_loc (loc, pred_type, pred); + expected = fold_convert_loc (loc, expected_type, expected); - case BUILT_IN_ISINF_SIGN: - { - /* isinf_sign(x) -> isinf(x) ? (signbit(x) ? -1 : 1) : 0 */ - /* In a boolean context, GCC will fold the inner COND_EXPR to - 1. So e.g. "if (isinf_sign(x))" would be folded to just - "if (isinf(x) ? 1 : 0)" which becomes "if (isinf(x))". */ - tree signbit_fn = builtin_decl_explicit (BUILT_IN_SIGNBIT); - tree isinf_fn = builtin_decl_explicit (BUILT_IN_ISINF); - tree tmp = NULL_TREE; + if (probability) + call_expr = build_call_expr_loc (loc, fn, 3, pred, expected, probability); + else + call_expr = build_call_expr_loc (loc, fn, predictor ? 3 : 2, pred, expected, + predictor); - arg = builtin_save_expr (arg); + return build2 (NE_EXPR, TREE_TYPE (pred), call_expr, + build_int_cst (ret_type, 0)); +} - if (signbit_fn && isinf_fn) - { - tree signbit_call = build_call_expr_loc (loc, signbit_fn, 1, arg); - tree isinf_call = build_call_expr_loc (loc, isinf_fn, 1, arg); +/* Fold a call to builtin_expect with arguments ARG0, ARG1, ARG2, ARG3. Return + NULL_TREE if no simplification is possible. */ - signbit_call = fold_build2_loc (loc, NE_EXPR, integer_type_node, - signbit_call, integer_zero_node); - isinf_call = fold_build2_loc (loc, NE_EXPR, integer_type_node, - isinf_call, integer_zero_node); +tree +fold_builtin_expect (location_t loc, tree arg0, tree arg1, tree arg2, + tree arg3) +{ + tree inner, fndecl, inner_arg0; + enum tree_code code; - tmp = fold_build3_loc (loc, COND_EXPR, integer_type_node, signbit_call, - integer_minus_one_node, integer_one_node); - tmp = fold_build3_loc (loc, COND_EXPR, integer_type_node, - isinf_call, tmp, - integer_zero_node); - } + /* Distribute the expected value over short-circuiting operators. + See through the cast from truthvalue_type_node to long. */ + inner_arg0 = arg0; + while (CONVERT_EXPR_P (inner_arg0) + && INTEGRAL_TYPE_P (TREE_TYPE (inner_arg0)) + && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (inner_arg0, 0)))) + inner_arg0 = TREE_OPERAND (inner_arg0, 0); - return tmp; - } + /* If this is a builtin_expect within a builtin_expect keep the + inner one. See through a comparison against a constant. It + might have been added to create a thruthvalue. */ + inner = inner_arg0; - case BUILT_IN_ISFINITE: - if (tree_expr_finite_p (arg)) - return omit_one_operand_loc (loc, type, integer_one_node, arg); - if (tree_expr_nan_p (arg) || tree_expr_infinite_p (arg)) - return omit_one_operand_loc (loc, type, integer_zero_node, arg); - return NULL_TREE; + if (COMPARISON_CLASS_P (inner) + && TREE_CODE (TREE_OPERAND (inner, 1)) == INTEGER_CST) + inner = TREE_OPERAND (inner, 0); - case BUILT_IN_ISNAN: - if (tree_expr_nan_p (arg)) - return omit_one_operand_loc (loc, type, integer_one_node, arg); - if (!tree_expr_maybe_nan_p (arg)) - return omit_one_operand_loc (loc, type, integer_zero_node, arg); + if (TREE_CODE (inner) == CALL_EXPR + && (fndecl = get_callee_fndecl (inner)) + && (fndecl_built_in_p (fndecl, BUILT_IN_EXPECT) + || fndecl_built_in_p (fndecl, BUILT_IN_EXPECT_WITH_PROBABILITY))) + return arg0; - { - bool is_ibm_extended = MODE_COMPOSITE_P (TYPE_MODE (TREE_TYPE (arg))); - if (is_ibm_extended) - { - /* NaN and Inf are encoded in the high-order double value - only. The low-order value is not significant. */ - arg = fold_build1_loc (loc, NOP_EXPR, double_type_node, arg); - } - } - arg = builtin_save_expr (arg); - return fold_build2_loc (loc, UNORDERED_EXPR, type, arg, arg); + inner = inner_arg0; + code = TREE_CODE (inner); + if (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR) + { + tree op0 = TREE_OPERAND (inner, 0); + tree op1 = TREE_OPERAND (inner, 1); + arg1 = save_expr (arg1); - default: - gcc_unreachable (); + op0 = build_builtin_expect_predicate (loc, op0, arg1, arg2, arg3); + op1 = build_builtin_expect_predicate (loc, op1, arg1, arg2, arg3); + inner = build2 (code, TREE_TYPE (inner), op0, op1); + + return fold_convert_loc (loc, TREE_TYPE (arg0), inner); + } + + /* If the argument isn't invariant then there's nothing else we can do. */ + if (!TREE_CONSTANT (inner_arg0)) + return NULL_TREE; + + /* If we expect that a comparison against the argument will fold to + a constant return the constant. In practice, this means a true + constant or the address of a non-weak symbol. */ + inner = inner_arg0; + STRIP_NOPS (inner); + if (TREE_CODE (inner) == ADDR_EXPR) + { + do + { + inner = TREE_OPERAND (inner, 0); + } + while (TREE_CODE (inner) == COMPONENT_REF + || TREE_CODE (inner) == ARRAY_REF); + if (VAR_OR_FUNCTION_DECL_P (inner) && DECL_WEAK (inner)) + return NULL_TREE; } + + /* Otherwise, ARG0 already has the proper type for the return value. */ + return arg0; } -/* Fold a call to __builtin_fpclassify(int, int, int, int, int, ...). - This builtin will generate code to return the appropriate floating - point classification depending on the value of the floating point - number passed in. The possible return values must be supplied as - int arguments to the call in the following order: FP_NAN, FP_INFINITE, - FP_NORMAL, FP_SUBNORMAL and FP_ZERO. The ellipses is for exactly - one floating point argument which is "type generic". */ +/* Fold a call to __builtin_classify_type with argument ARG. */ static tree -fold_builtin_fpclassify (location_t loc, tree *args, int nargs) +fold_builtin_classify_type (tree arg) { - tree fp_nan, fp_infinite, fp_normal, fp_subnormal, fp_zero, - arg, type, res, tmp; - machine_mode mode; - REAL_VALUE_TYPE r; - char buf[128]; + if (arg == 0) + return build_int_cst (integer_type_node, no_type_class); - /* Verify the required arguments in the original call. */ - if (nargs != 6 - || !validate_arg (args[0], INTEGER_TYPE) - || !validate_arg (args[1], INTEGER_TYPE) - || !validate_arg (args[2], INTEGER_TYPE) - || !validate_arg (args[3], INTEGER_TYPE) - || !validate_arg (args[4], INTEGER_TYPE) - || !validate_arg (args[5], REAL_TYPE)) + return build_int_cst (integer_type_node, type_to_class (TREE_TYPE (arg))); +} + +/* Fold a call EXPR (which may be null) to __builtin_strlen with argument + ARG. */ + +static tree +fold_builtin_strlen (location_t loc, tree expr, tree type, tree arg) +{ + if (!validate_arg (arg, POINTER_TYPE)) return NULL_TREE; + else + { + c_strlen_data lendata = { }; + tree len = c_strlen (arg, 0, &lendata); - fp_nan = args[0]; - fp_infinite = args[1]; - fp_normal = args[2]; - fp_subnormal = args[3]; - fp_zero = args[4]; - arg = args[5]; - type = TREE_TYPE (arg); - mode = TYPE_MODE (type); - arg = builtin_save_expr (fold_build1_loc (loc, ABS_EXPR, type, arg)); + if (len) + return fold_convert_loc (loc, type, len); + + if (!lendata.decl) + c_strlen (arg, 1, &lendata); + + if (lendata.decl) + { + if (EXPR_HAS_LOCATION (arg)) + loc = EXPR_LOCATION (arg); + else if (loc == UNKNOWN_LOCATION) + loc = input_location; + warn_string_no_nul (loc, expr, "strlen", arg, lendata.decl); + } + + return NULL_TREE; + } +} + +/* Fold a call to __builtin_inf or __builtin_huge_val. */ + +static tree +fold_builtin_inf (location_t loc, tree type, int warn) +{ + REAL_VALUE_TYPE real; + + /* __builtin_inff is intended to be usable to define INFINITY on all + targets. If an infinity is not available, INFINITY expands "to a + positive constant of type float that overflows at translation + time", footnote "In this case, using INFINITY will violate the + constraint in 6.4.4 and thus require a diagnostic." (C99 7.12#4). + Thus we pedwarn to ensure this constraint violation is + diagnosed. */ + if (!MODE_HAS_INFINITIES (TYPE_MODE (type)) && warn) + pedwarn (loc, 0, "target format does not support infinity"); - /* fpclassify(x) -> - isnan(x) ? FP_NAN : - (fabs(x) == Inf ? FP_INFINITE : - (fabs(x) >= DBL_MIN ? FP_NORMAL : - (x == 0 ? FP_ZERO : FP_SUBNORMAL))). */ + real_inf (&real); + return build_real (type, real); +} - tmp = fold_build2_loc (loc, EQ_EXPR, integer_type_node, arg, - build_real (type, dconst0)); - res = fold_build3_loc (loc, COND_EXPR, integer_type_node, - tmp, fp_zero, fp_subnormal); +/* Fold function call to builtin sincos, sincosf, or sincosl. Return + NULL_TREE if no simplification can be made. */ - sprintf (buf, "0x1p%d", REAL_MODE_FORMAT (mode)->emin - 1); - real_from_string (&r, buf); - tmp = fold_build2_loc (loc, GE_EXPR, integer_type_node, - arg, build_real (type, r)); - res = fold_build3_loc (loc, COND_EXPR, integer_type_node, tmp, fp_normal, res); +static tree +fold_builtin_sincos (location_t loc, + tree arg0, tree arg1, tree arg2) +{ + tree type; + tree fndecl, call = NULL_TREE; - if (tree_expr_maybe_infinite_p (arg)) + if (!validate_arg (arg0, REAL_TYPE) + || !validate_arg (arg1, POINTER_TYPE) + || !validate_arg (arg2, POINTER_TYPE)) + return NULL_TREE; + + type = TREE_TYPE (arg0); + + /* Calculate the result when the argument is a constant. */ + built_in_function fn = mathfn_built_in_2 (type, CFN_BUILT_IN_CEXPI); + if (fn == END_BUILTINS) + return NULL_TREE; + + /* Canonicalize sincos to cexpi. */ + if (TREE_CODE (arg0) == REAL_CST) { - real_inf (&r); - tmp = fold_build2_loc (loc, EQ_EXPR, integer_type_node, arg, - build_real (type, r)); - res = fold_build3_loc (loc, COND_EXPR, integer_type_node, tmp, - fp_infinite, res); + tree complex_type = build_complex_type (type); + call = fold_const_call (as_combined_fn (fn), complex_type, arg0); } - - if (tree_expr_maybe_nan_p (arg)) + if (!call) { - tmp = fold_build2_loc (loc, ORDERED_EXPR, integer_type_node, arg, arg); - res = fold_build3_loc (loc, COND_EXPR, integer_type_node, tmp, res, fp_nan); + if (!targetm.libc_has_function (function_c99_math_complex, type) + || !builtin_decl_implicit_p (fn)) + return NULL_TREE; + fndecl = builtin_decl_explicit (fn); + call = build_call_expr_loc (loc, fndecl, 1, arg0); + call = builtin_save_expr (call); } - return res; + tree ptype = build_pointer_type (type); + arg1 = fold_convert (ptype, arg1); + arg2 = fold_convert (ptype, arg2); + return build2 (COMPOUND_EXPR, void_type_node, + build2 (MODIFY_EXPR, void_type_node, + build_fold_indirect_ref_loc (loc, arg1), + fold_build1_loc (loc, IMAGPART_EXPR, type, call)), + build2 (MODIFY_EXPR, void_type_node, + build_fold_indirect_ref_loc (loc, arg2), + fold_build1_loc (loc, REALPART_EXPR, type, call))); } -/* Fold a call to an unordered comparison function such as - __builtin_isgreater(). FNDECL is the FUNCTION_DECL for the function - being called and ARG0 and ARG1 are the arguments for the call. - UNORDERED_CODE and ORDERED_CODE are comparison codes that give - the opposite of the desired result. UNORDERED_CODE is used - for modes that can hold NaNs and ORDERED_CODE is used for - the rest. */ +/* Fold function call to builtin memcmp with arguments ARG1 and ARG2. + Return NULL_TREE if no simplification can be made. */ static tree -fold_builtin_unordered_cmp (location_t loc, tree fndecl, tree arg0, tree arg1, - enum tree_code unordered_code, - enum tree_code ordered_code) +fold_builtin_memcmp (location_t loc, tree arg1, tree arg2, tree len) { - tree type = TREE_TYPE (TREE_TYPE (fndecl)); - enum tree_code code; - tree type0, type1; - enum tree_code code0, code1; - tree cmp_type = NULL_TREE; - - type0 = TREE_TYPE (arg0); - type1 = TREE_TYPE (arg1); - - code0 = TREE_CODE (type0); - code1 = TREE_CODE (type1); + if (!validate_arg (arg1, POINTER_TYPE) + || !validate_arg (arg2, POINTER_TYPE) + || !validate_arg (len, INTEGER_TYPE)) + return NULL_TREE; - if (code0 == REAL_TYPE && code1 == REAL_TYPE) - /* Choose the wider of two real types. */ - cmp_type = TYPE_PRECISION (type0) >= TYPE_PRECISION (type1) - ? type0 : type1; - else if (code0 == REAL_TYPE && code1 == INTEGER_TYPE) - cmp_type = type0; - else if (code0 == INTEGER_TYPE && code1 == REAL_TYPE) - cmp_type = type1; + /* If the LEN parameter is zero, return zero. */ + if (integer_zerop (len)) + return omit_two_operands_loc (loc, integer_type_node, integer_zero_node, + arg1, arg2); - arg0 = fold_convert_loc (loc, cmp_type, arg0); - arg1 = fold_convert_loc (loc, cmp_type, arg1); + /* If ARG1 and ARG2 are the same (and not volatile), return zero. */ + if (operand_equal_p (arg1, arg2, 0)) + return omit_one_operand_loc (loc, integer_type_node, integer_zero_node, len); - if (unordered_code == UNORDERED_EXPR) + /* If len parameter is one, return an expression corresponding to + (*(const unsigned char*)arg1 - (const unsigned char*)arg2). */ + if (tree_fits_uhwi_p (len) && tree_to_uhwi (len) == 1) { - if (tree_expr_nan_p (arg0) || tree_expr_nan_p (arg1)) - return omit_two_operands_loc (loc, type, integer_one_node, arg0, arg1); - if (!tree_expr_maybe_nan_p (arg0) && !tree_expr_maybe_nan_p (arg1)) - return omit_two_operands_loc (loc, type, integer_zero_node, arg0, arg1); - return fold_build2_loc (loc, UNORDERED_EXPR, type, arg0, arg1); + tree cst_uchar_node = build_type_variant (unsigned_char_type_node, 1, 0); + tree cst_uchar_ptr_node + = build_pointer_type_for_mode (cst_uchar_node, ptr_mode, true); + + tree ind1 + = fold_convert_loc (loc, integer_type_node, + build1 (INDIRECT_REF, cst_uchar_node, + fold_convert_loc (loc, + cst_uchar_ptr_node, + arg1))); + tree ind2 + = fold_convert_loc (loc, integer_type_node, + build1 (INDIRECT_REF, cst_uchar_node, + fold_convert_loc (loc, + cst_uchar_ptr_node, + arg2))); + return fold_build2_loc (loc, MINUS_EXPR, integer_type_node, ind1, ind2); } - code = (tree_expr_maybe_nan_p (arg0) || tree_expr_maybe_nan_p (arg1)) - ? unordered_code : ordered_code; - return fold_build1_loc (loc, TRUTH_NOT_EXPR, type, - fold_build2_loc (loc, code, type, arg0, arg1)); + return NULL_TREE; } -/* Fold __builtin_{,s,u}{add,sub,mul}{,l,ll}_overflow, either into normal - arithmetics if it can never overflow, or into internal functions that - return both result of arithmetics and overflowed boolean flag in - a complex integer result, or some other check for overflow. - Similarly fold __builtin_{add,sub,mul}_overflow_p to just the overflow - checking part of that. */ +/* Fold a call to builtin isascii with argument ARG. */ static tree -fold_builtin_arith_overflow (location_t loc, enum built_in_function fcode, - tree arg0, tree arg1, tree arg2) +fold_builtin_isascii (location_t loc, tree arg) { - enum internal_fn ifn = IFN_LAST; - /* The code of the expression corresponding to the built-in. */ - enum tree_code opcode = ERROR_MARK; - bool ovf_only = false; - - switch (fcode) + if (!validate_arg (arg, INTEGER_TYPE)) + return NULL_TREE; + else { - case BUILT_IN_ADD_OVERFLOW_P: - ovf_only = true; - /* FALLTHRU */ - case BUILT_IN_ADD_OVERFLOW: - case BUILT_IN_SADD_OVERFLOW: - case BUILT_IN_SADDL_OVERFLOW: - case BUILT_IN_SADDLL_OVERFLOW: - case BUILT_IN_UADD_OVERFLOW: - case BUILT_IN_UADDL_OVERFLOW: - case BUILT_IN_UADDLL_OVERFLOW: - opcode = PLUS_EXPR; - ifn = IFN_ADD_OVERFLOW; - break; - case BUILT_IN_SUB_OVERFLOW_P: - ovf_only = true; - /* FALLTHRU */ - case BUILT_IN_SUB_OVERFLOW: - case BUILT_IN_SSUB_OVERFLOW: - case BUILT_IN_SSUBL_OVERFLOW: - case BUILT_IN_SSUBLL_OVERFLOW: - case BUILT_IN_USUB_OVERFLOW: - case BUILT_IN_USUBL_OVERFLOW: - case BUILT_IN_USUBLL_OVERFLOW: - opcode = MINUS_EXPR; - ifn = IFN_SUB_OVERFLOW; - break; - case BUILT_IN_MUL_OVERFLOW_P: - ovf_only = true; - /* FALLTHRU */ - case BUILT_IN_MUL_OVERFLOW: - case BUILT_IN_SMUL_OVERFLOW: - case BUILT_IN_SMULL_OVERFLOW: - case BUILT_IN_SMULLL_OVERFLOW: - case BUILT_IN_UMUL_OVERFLOW: - case BUILT_IN_UMULL_OVERFLOW: - case BUILT_IN_UMULLL_OVERFLOW: - opcode = MULT_EXPR; - ifn = IFN_MUL_OVERFLOW; - break; - default: - gcc_unreachable (); + /* Transform isascii(c) -> ((c & ~0x7f) == 0). */ + arg = fold_build2 (BIT_AND_EXPR, integer_type_node, arg, + build_int_cst (integer_type_node, + ~ (unsigned HOST_WIDE_INT) 0x7f)); + return fold_build2_loc (loc, EQ_EXPR, integer_type_node, + arg, integer_zero_node); } +} - /* For the "generic" overloads, the first two arguments can have different - types and the last argument determines the target type to use to check - for overflow. The arguments of the other overloads all have the same - type. */ - tree type = ovf_only ? TREE_TYPE (arg2) : TREE_TYPE (TREE_TYPE (arg2)); +/* Fold a call to builtin toascii with argument ARG. */ - /* For the __builtin_{add,sub,mul}_overflow_p builtins, when the first two - arguments are constant, attempt to fold the built-in call into a constant - expression indicating whether or not it detected an overflow. */ - if (ovf_only - && TREE_CODE (arg0) == INTEGER_CST - && TREE_CODE (arg1) == INTEGER_CST) - /* Perform the computation in the target type and check for overflow. */ - return omit_one_operand_loc (loc, boolean_type_node, - arith_overflowed_p (opcode, type, arg0, arg1) - ? boolean_true_node : boolean_false_node, - arg2); +static tree +fold_builtin_toascii (location_t loc, tree arg) +{ + if (!validate_arg (arg, INTEGER_TYPE)) + return NULL_TREE; - tree intres, ovfres; - if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST) - { - intres = fold_binary_loc (loc, opcode, type, - fold_convert_loc (loc, type, arg0), - fold_convert_loc (loc, type, arg1)); - if (TREE_OVERFLOW (intres)) - intres = drop_tree_overflow (intres); - ovfres = (arith_overflowed_p (opcode, type, arg0, arg1) - ? boolean_true_node : boolean_false_node); - } + /* Transform toascii(c) -> (c & 0x7f). */ + return fold_build2_loc (loc, BIT_AND_EXPR, integer_type_node, arg, + build_int_cst (integer_type_node, 0x7f)); +} + +/* Fold a call to builtin isdigit with argument ARG. */ + +static tree +fold_builtin_isdigit (location_t loc, tree arg) +{ + if (!validate_arg (arg, INTEGER_TYPE)) + return NULL_TREE; else { - tree ctype = build_complex_type (type); - tree call = build_call_expr_internal_loc (loc, ifn, ctype, 2, - arg0, arg1); - tree tgt = save_expr (call); - intres = build1_loc (loc, REALPART_EXPR, type, tgt); - ovfres = build1_loc (loc, IMAGPART_EXPR, type, tgt); - ovfres = fold_convert_loc (loc, boolean_type_node, ovfres); - } + /* Transform isdigit(c) -> (unsigned)(c) - '0' <= 9. */ + /* According to the C standard, isdigit is unaffected by locale. + However, it definitely is affected by the target character set. */ + unsigned HOST_WIDE_INT target_digit0 + = lang_hooks.to_target_charset ('0'); - if (ovf_only) - return omit_one_operand_loc (loc, boolean_type_node, ovfres, arg2); + if (target_digit0 == 0) + return NULL_TREE; - tree mem_arg2 = build_fold_indirect_ref_loc (loc, arg2); - tree store - = fold_build2_loc (loc, MODIFY_EXPR, void_type_node, mem_arg2, intres); - return build2_loc (loc, COMPOUND_EXPR, boolean_type_node, store, ovfres); + arg = fold_convert_loc (loc, unsigned_type_node, arg); + arg = fold_build2 (MINUS_EXPR, unsigned_type_node, arg, + build_int_cst (unsigned_type_node, target_digit0)); + return fold_build2_loc (loc, LE_EXPR, integer_type_node, arg, + build_int_cst (unsigned_type_node, 9)); + } } -/* Fold a call to __builtin_FILE to a constant string. */ +/* Fold a call to fabs, fabsf or fabsl with argument ARG. */ -static inline tree -fold_builtin_FILE (location_t loc) +static tree +fold_builtin_fabs (location_t loc, tree arg, tree type) { - if (const char *fname = LOCATION_FILE (loc)) - { - /* The documentation says this builtin is equivalent to the preprocessor - __FILE__ macro so it appears appropriate to use the same file prefix - mappings. */ - fname = remap_macro_filename (fname); - return build_string_literal (strlen (fname) + 1, fname); - } + if (!validate_arg (arg, REAL_TYPE)) + return NULL_TREE; - return build_string_literal (1, ""); + arg = fold_convert_loc (loc, type, arg); + return fold_build1_loc (loc, ABS_EXPR, type, arg); } -/* Fold a call to __builtin_FUNCTION to a constant string. */ +/* Fold a call to abs, labs, llabs or imaxabs with argument ARG. */ -static inline tree -fold_builtin_FUNCTION () +static tree +fold_builtin_abs (location_t loc, tree arg, tree type) { - const char *name = ""; - - if (current_function_decl) - name = lang_hooks.decl_printable_name (current_function_decl, 0); + if (!validate_arg (arg, INTEGER_TYPE)) + return NULL_TREE; - return build_string_literal (strlen (name) + 1, name); + arg = fold_convert_loc (loc, type, arg); + return fold_build1_loc (loc, ABS_EXPR, type, arg); } -/* Fold a call to __builtin_LINE to an integer constant. */ +/* Fold a call to builtin carg(a+bi) -> atan2(b,a). */ -static inline tree -fold_builtin_LINE (location_t loc, tree type) +static tree +fold_builtin_carg (location_t loc, tree arg, tree type) { - return build_int_cst (type, LOCATION_LINE (loc)); + if (validate_arg (arg, COMPLEX_TYPE) + && TREE_CODE (TREE_TYPE (TREE_TYPE (arg))) == REAL_TYPE) + { + tree atan2_fn = mathfn_built_in (type, BUILT_IN_ATAN2); + + if (atan2_fn) + { + tree new_arg = builtin_save_expr (arg); + tree r_arg = fold_build1_loc (loc, REALPART_EXPR, type, new_arg); + tree i_arg = fold_build1_loc (loc, IMAGPART_EXPR, type, new_arg); + return build_call_expr_loc (loc, atan2_fn, 2, i_arg, r_arg); + } + } + + return NULL_TREE; } -/* Fold a call to built-in function FNDECL with 0 arguments. - This function returns NULL_TREE if no simplification was possible. */ +/* Fold a call to builtin frexp, we can assume the base is 2. */ static tree -fold_builtin_0 (location_t loc, tree fndecl) +fold_builtin_frexp (location_t loc, tree arg0, tree arg1, tree rettype) { - tree type = TREE_TYPE (TREE_TYPE (fndecl)); - enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); - switch (fcode) - { - case BUILT_IN_FILE: - return fold_builtin_FILE (loc); + if (! validate_arg (arg0, REAL_TYPE) || ! validate_arg (arg1, POINTER_TYPE)) + return NULL_TREE; - case BUILT_IN_FUNCTION: - return fold_builtin_FUNCTION (); + STRIP_NOPS (arg0); - case BUILT_IN_LINE: - return fold_builtin_LINE (loc, type); + if (!(TREE_CODE (arg0) == REAL_CST && ! TREE_OVERFLOW (arg0))) + return NULL_TREE; - CASE_FLT_FN (BUILT_IN_INF): - CASE_FLT_FN_FLOATN_NX (BUILT_IN_INF): - case BUILT_IN_INFD32: - case BUILT_IN_INFD64: - case BUILT_IN_INFD128: - return fold_builtin_inf (loc, type, true); + arg1 = build_fold_indirect_ref_loc (loc, arg1); - CASE_FLT_FN (BUILT_IN_HUGE_VAL): - CASE_FLT_FN_FLOATN_NX (BUILT_IN_HUGE_VAL): - return fold_builtin_inf (loc, type, false); + /* Proceed if a valid pointer type was passed in. */ + if (TYPE_MAIN_VARIANT (TREE_TYPE (arg1)) == integer_type_node) + { + const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (arg0); + tree frac, exp; - case BUILT_IN_CLASSIFY_TYPE: - return fold_builtin_classify_type (NULL_TREE); + switch (value->cl) + { + case rvc_zero: + /* For +-0, return (*exp = 0, +-0). */ + exp = integer_zero_node; + frac = arg0; + break; + case rvc_nan: + case rvc_inf: + /* For +-NaN or +-Inf, *exp is unspecified, return arg0. */ + return omit_one_operand_loc (loc, rettype, arg0, arg1); + case rvc_normal: + { + /* Since the frexp function always expects base 2, and in + GCC normalized significands are already in the range + [0.5, 1.0), we have exactly what frexp wants. */ + REAL_VALUE_TYPE frac_rvt = *value; + SET_REAL_EXP (&frac_rvt, 0); + frac = build_real (rettype, frac_rvt); + exp = build_int_cst (integer_type_node, REAL_EXP (value)); + } + break; + default: + gcc_unreachable (); + } - default: - break; + /* Create the COMPOUND_EXPR (*arg1 = trunc, frac). */ + arg1 = fold_build2_loc (loc, MODIFY_EXPR, rettype, arg1, exp); + TREE_SIDE_EFFECTS (arg1) = 1; + return fold_build2_loc (loc, COMPOUND_EXPR, rettype, arg1, frac); } + return NULL_TREE; } -/* Fold a call to built-in function FNDECL with 1 argument, ARG0. - This function returns NULL_TREE if no simplification was possible. */ +/* Fold a call to builtin modf. */ static tree -fold_builtin_1 (location_t loc, tree expr, tree fndecl, tree arg0) +fold_builtin_modf (location_t loc, tree arg0, tree arg1, tree rettype) { - tree type = TREE_TYPE (TREE_TYPE (fndecl)); - enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); + if (! validate_arg (arg0, REAL_TYPE) || ! validate_arg (arg1, POINTER_TYPE)) + return NULL_TREE; - if (TREE_CODE (arg0) == ERROR_MARK) + STRIP_NOPS (arg0); + + if (!(TREE_CODE (arg0) == REAL_CST && ! TREE_OVERFLOW (arg0))) return NULL_TREE; - if (tree ret = fold_const_call (as_combined_fn (fcode), type, arg0)) - return ret; + arg1 = build_fold_indirect_ref_loc (loc, arg1); - switch (fcode) + /* Proceed if a valid pointer type was passed in. */ + if (TYPE_MAIN_VARIANT (TREE_TYPE (arg1)) == TYPE_MAIN_VARIANT (rettype)) { - case BUILT_IN_CONSTANT_P: - { - tree val = fold_builtin_constant_p (arg0); - - /* Gimplification will pull the CALL_EXPR for the builtin out of - an if condition. When not optimizing, we'll not CSE it back. - To avoid link error types of regressions, return false now. */ - if (!val && !optimize) - val = integer_zero_node; + const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (arg0); + REAL_VALUE_TYPE trunc, frac; - return val; + switch (value->cl) + { + case rvc_nan: + case rvc_zero: + /* For +-NaN or +-0, return (*arg1 = arg0, arg0). */ + trunc = frac = *value; + break; + case rvc_inf: + /* For +-Inf, return (*arg1 = arg0, +-0). */ + frac = dconst0; + frac.sign = value->sign; + trunc = *value; + break; + case rvc_normal: + /* Return (*arg1 = trunc(arg0), arg0-trunc(arg0)). */ + real_trunc (&trunc, VOIDmode, value); + real_arithmetic (&frac, MINUS_EXPR, value, &trunc); + /* If the original number was negative and already + integral, then the fractional part is -0.0. */ + if (value->sign && frac.cl == rvc_zero) + frac.sign = value->sign; + break; } - case BUILT_IN_CLASSIFY_TYPE: - return fold_builtin_classify_type (arg0); - - case BUILT_IN_STRLEN: - return fold_builtin_strlen (loc, expr, type, arg0); + /* Create the COMPOUND_EXPR (*arg1 = trunc, frac). */ + arg1 = fold_build2_loc (loc, MODIFY_EXPR, rettype, arg1, + build_real (rettype, trunc)); + TREE_SIDE_EFFECTS (arg1) = 1; + return fold_build2_loc (loc, COMPOUND_EXPR, rettype, arg1, + build_real (rettype, frac)); + } - CASE_FLT_FN (BUILT_IN_FABS): - CASE_FLT_FN_FLOATN_NX (BUILT_IN_FABS): - case BUILT_IN_FABSD32: - case BUILT_IN_FABSD64: - case BUILT_IN_FABSD128: - return fold_builtin_fabs (loc, arg0, type); + return NULL_TREE; +} - case BUILT_IN_ABS: - case BUILT_IN_LABS: - case BUILT_IN_LLABS: - case BUILT_IN_IMAXABS: - return fold_builtin_abs (loc, arg0, type); +/* Given a location LOC, an interclass builtin function decl FNDECL + and its single argument ARG, return an folded expression computing + the same, or NULL_TREE if we either couldn't or didn't want to fold + (the latter happen if there's an RTL instruction available). */ - CASE_FLT_FN (BUILT_IN_CONJ): - if (validate_arg (arg0, COMPLEX_TYPE) - && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE) - return fold_build1_loc (loc, CONJ_EXPR, type, arg0); - break; +static tree +fold_builtin_interclass_mathfn (location_t loc, tree fndecl, tree arg) +{ + machine_mode mode; - CASE_FLT_FN (BUILT_IN_CREAL): - if (validate_arg (arg0, COMPLEX_TYPE) - && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE) - return non_lvalue_loc (loc, fold_build1_loc (loc, REALPART_EXPR, type, arg0)); - break; + if (!validate_arg (arg, REAL_TYPE)) + return NULL_TREE; - CASE_FLT_FN (BUILT_IN_CIMAG): - if (validate_arg (arg0, COMPLEX_TYPE) - && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE) - return non_lvalue_loc (loc, fold_build1_loc (loc, IMAGPART_EXPR, type, arg0)); - break; + if (interclass_mathfn_icode (arg, fndecl) != CODE_FOR_nothing) + return NULL_TREE; - CASE_FLT_FN (BUILT_IN_CARG): - return fold_builtin_carg (loc, arg0, type); + mode = TYPE_MODE (TREE_TYPE (arg)); - case BUILT_IN_ISASCII: - return fold_builtin_isascii (loc, arg0); + bool is_ibm_extended = MODE_COMPOSITE_P (mode); - case BUILT_IN_TOASCII: - return fold_builtin_toascii (loc, arg0); + /* If there is no optab, try generic code. */ + switch (DECL_FUNCTION_CODE (fndecl)) + { + tree result; - case BUILT_IN_ISDIGIT: - return fold_builtin_isdigit (loc, arg0); + CASE_FLT_FN (BUILT_IN_ISINF): + { + /* isinf(x) -> isgreater(fabs(x),DBL_MAX). */ + tree const isgr_fn = builtin_decl_explicit (BUILT_IN_ISGREATER); + tree type = TREE_TYPE (arg); + REAL_VALUE_TYPE r; + char buf[128]; + if (is_ibm_extended) + { + /* NaN and Inf are encoded in the high-order double value + only. The low-order value is not significant. */ + type = double_type_node; + mode = DFmode; + arg = fold_build1_loc (loc, NOP_EXPR, type, arg); + } + get_max_float (REAL_MODE_FORMAT (mode), buf, sizeof (buf), false); + real_from_string (&r, buf); + result = build_call_expr (isgr_fn, 2, + fold_build1_loc (loc, ABS_EXPR, type, arg), + build_real (type, r)); + return result; + } CASE_FLT_FN (BUILT_IN_FINITE): - case BUILT_IN_FINITED32: - case BUILT_IN_FINITED64: - case BUILT_IN_FINITED128: case BUILT_IN_ISFINITE: { - tree ret = fold_builtin_classify (loc, fndecl, arg0, BUILT_IN_ISFINITE); - if (ret) - return ret; - return fold_builtin_interclass_mathfn (loc, fndecl, arg0); - } + /* isfinite(x) -> islessequal(fabs(x),DBL_MAX). */ + tree const isle_fn = builtin_decl_explicit (BUILT_IN_ISLESSEQUAL); + tree type = TREE_TYPE (arg); + REAL_VALUE_TYPE r; + char buf[128]; - CASE_FLT_FN (BUILT_IN_ISINF): - case BUILT_IN_ISINFD32: - case BUILT_IN_ISINFD64: - case BUILT_IN_ISINFD128: - { - tree ret = fold_builtin_classify (loc, fndecl, arg0, BUILT_IN_ISINF); - if (ret) - return ret; - return fold_builtin_interclass_mathfn (loc, fndecl, arg0); + if (is_ibm_extended) + { + /* NaN and Inf are encoded in the high-order double value + only. The low-order value is not significant. */ + type = double_type_node; + mode = DFmode; + arg = fold_build1_loc (loc, NOP_EXPR, type, arg); + } + get_max_float (REAL_MODE_FORMAT (mode), buf, sizeof (buf), false); + real_from_string (&r, buf); + result = build_call_expr (isle_fn, 2, + fold_build1_loc (loc, ABS_EXPR, type, arg), + build_real (type, r)); + /*result = fold_build2_loc (loc, UNGT_EXPR, + TREE_TYPE (TREE_TYPE (fndecl)), + fold_build1_loc (loc, ABS_EXPR, type, arg), + build_real (type, r)); + result = fold_build1_loc (loc, TRUTH_NOT_EXPR, + TREE_TYPE (TREE_TYPE (fndecl)), + result);*/ + return result; } - case BUILT_IN_ISNORMAL: - return fold_builtin_interclass_mathfn (loc, fndecl, arg0); - - case BUILT_IN_ISINF_SIGN: - return fold_builtin_classify (loc, fndecl, arg0, BUILT_IN_ISINF_SIGN); + { + /* isnormal(x) -> isgreaterequal(fabs(x),DBL_MIN) & + islessequal(fabs(x),DBL_MAX). */ + tree const isle_fn = builtin_decl_explicit (BUILT_IN_ISLESSEQUAL); + tree type = TREE_TYPE (arg); + tree orig_arg, max_exp, min_exp; + machine_mode orig_mode = mode; + REAL_VALUE_TYPE rmax, rmin; + char buf[128]; - CASE_FLT_FN (BUILT_IN_ISNAN): - case BUILT_IN_ISNAND32: - case BUILT_IN_ISNAND64: - case BUILT_IN_ISNAND128: - return fold_builtin_classify (loc, fndecl, arg0, BUILT_IN_ISNAN); + orig_arg = arg = builtin_save_expr (arg); + if (is_ibm_extended) + { + /* Use double to test the normal range of IBM extended + precision. Emin for IBM extended precision is + different to emin for IEEE double, being 53 higher + since the low double exponent is at least 53 lower + than the high double exponent. */ + type = double_type_node; + mode = DFmode; + arg = fold_build1_loc (loc, NOP_EXPR, type, arg); + } + arg = fold_build1_loc (loc, ABS_EXPR, type, arg); - case BUILT_IN_FREE: - if (integer_zerop (arg0)) - return build_empty_stmt (loc); - break; + get_max_float (REAL_MODE_FORMAT (mode), buf, sizeof (buf), false); + real_from_string (&rmax, buf); + sprintf (buf, "0x1p%d", REAL_MODE_FORMAT (orig_mode)->emin - 1); + real_from_string (&rmin, buf); + max_exp = build_real (type, rmax); + min_exp = build_real (type, rmin); + max_exp = build_call_expr (isle_fn, 2, arg, max_exp); + if (is_ibm_extended) + { + /* Testing the high end of the range is done just using + the high double, using the same test as isfinite(). + For the subnormal end of the range we first test the + high double, then if its magnitude is equal to the + limit of 0x1p-969, we test whether the low double is + non-zero and opposite sign to the high double. */ + tree const islt_fn = builtin_decl_explicit (BUILT_IN_ISLESS); + tree const isgt_fn = builtin_decl_explicit (BUILT_IN_ISGREATER); + tree gt_min = build_call_expr (isgt_fn, 2, arg, min_exp); + tree eq_min = fold_build2 (EQ_EXPR, integer_type_node, + arg, min_exp); + tree as_complex = build1 (VIEW_CONVERT_EXPR, + complex_double_type_node, orig_arg); + tree hi_dbl = build1 (REALPART_EXPR, type, as_complex); + tree lo_dbl = build1 (IMAGPART_EXPR, type, as_complex); + tree zero = build_real (type, dconst0); + tree hilt = build_call_expr (islt_fn, 2, hi_dbl, zero); + tree lolt = build_call_expr (islt_fn, 2, lo_dbl, zero); + tree logt = build_call_expr (isgt_fn, 2, lo_dbl, zero); + tree ok_lo = fold_build1 (TRUTH_NOT_EXPR, integer_type_node, + fold_build3 (COND_EXPR, + integer_type_node, + hilt, logt, lolt)); + eq_min = fold_build2 (TRUTH_ANDIF_EXPR, integer_type_node, + eq_min, ok_lo); + min_exp = fold_build2 (TRUTH_ORIF_EXPR, integer_type_node, + gt_min, eq_min); + } + else + { + tree const isge_fn + = builtin_decl_explicit (BUILT_IN_ISGREATEREQUAL); + min_exp = build_call_expr (isge_fn, 2, arg, min_exp); + } + result = fold_build2 (BIT_AND_EXPR, integer_type_node, + max_exp, min_exp); + return result; + } default: break; } return NULL_TREE; - } -/* Folds a call EXPR (which may be null) to built-in function FNDECL - with 2 arguments, ARG0 and ARG1. This function returns NULL_TREE - if no simplification was possible. */ +/* Fold a call to __builtin_isnan(), __builtin_isinf, __builtin_finite. + ARG is the argument for the call. */ static tree -fold_builtin_2 (location_t loc, tree expr, tree fndecl, tree arg0, tree arg1) +fold_builtin_classify (location_t loc, tree fndecl, tree arg, int builtin_index) { tree type = TREE_TYPE (TREE_TYPE (fndecl)); - enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); - if (TREE_CODE (arg0) == ERROR_MARK - || TREE_CODE (arg1) == ERROR_MARK) + if (!validate_arg (arg, REAL_TYPE)) return NULL_TREE; - if (tree ret = fold_const_call (as_combined_fn (fcode), type, arg0, arg1)) - return ret; - - switch (fcode) + switch (builtin_index) { - CASE_FLT_FN_REENT (BUILT_IN_GAMMA): /* GAMMA_R */ - CASE_FLT_FN_REENT (BUILT_IN_LGAMMA): /* LGAMMA_R */ - if (validate_arg (arg0, REAL_TYPE) - && validate_arg (arg1, POINTER_TYPE)) - return do_mpfr_lgamma_r (arg0, arg1, type); - break; + case BUILT_IN_ISINF: + if (tree_expr_infinite_p (arg)) + return omit_one_operand_loc (loc, type, integer_one_node, arg); + if (!tree_expr_maybe_infinite_p (arg)) + return omit_one_operand_loc (loc, type, integer_zero_node, arg); + return NULL_TREE; - CASE_FLT_FN (BUILT_IN_FREXP): - return fold_builtin_frexp (loc, arg0, arg1, type); + case BUILT_IN_ISINF_SIGN: + { + /* isinf_sign(x) -> isinf(x) ? (signbit(x) ? -1 : 1) : 0 */ + /* In a boolean context, GCC will fold the inner COND_EXPR to + 1. So e.g. "if (isinf_sign(x))" would be folded to just + "if (isinf(x) ? 1 : 0)" which becomes "if (isinf(x))". */ + tree signbit_fn = builtin_decl_explicit (BUILT_IN_SIGNBIT); + tree isinf_fn = builtin_decl_explicit (BUILT_IN_ISINF); + tree tmp = NULL_TREE; - CASE_FLT_FN (BUILT_IN_MODF): - return fold_builtin_modf (loc, arg0, arg1, type); + arg = builtin_save_expr (arg); - case BUILT_IN_STRSPN: - return fold_builtin_strspn (loc, expr, arg0, arg1); + if (signbit_fn && isinf_fn) + { + tree signbit_call = build_call_expr_loc (loc, signbit_fn, 1, arg); + tree isinf_call = build_call_expr_loc (loc, isinf_fn, 1, arg); + + signbit_call = fold_build2_loc (loc, NE_EXPR, integer_type_node, + signbit_call, integer_zero_node); + isinf_call = fold_build2_loc (loc, NE_EXPR, integer_type_node, + isinf_call, integer_zero_node); + + tmp = fold_build3_loc (loc, COND_EXPR, integer_type_node, signbit_call, + integer_minus_one_node, integer_one_node); + tmp = fold_build3_loc (loc, COND_EXPR, integer_type_node, + isinf_call, tmp, + integer_zero_node); + } + + return tmp; + } + + case BUILT_IN_ISFINITE: + if (tree_expr_finite_p (arg)) + return omit_one_operand_loc (loc, type, integer_one_node, arg); + if (tree_expr_nan_p (arg) || tree_expr_infinite_p (arg)) + return omit_one_operand_loc (loc, type, integer_zero_node, arg); + return NULL_TREE; + + case BUILT_IN_ISNAN: + if (tree_expr_nan_p (arg)) + return omit_one_operand_loc (loc, type, integer_one_node, arg); + if (!tree_expr_maybe_nan_p (arg)) + return omit_one_operand_loc (loc, type, integer_zero_node, arg); + + { + bool is_ibm_extended = MODE_COMPOSITE_P (TYPE_MODE (TREE_TYPE (arg))); + if (is_ibm_extended) + { + /* NaN and Inf are encoded in the high-order double value + only. The low-order value is not significant. */ + arg = fold_build1_loc (loc, NOP_EXPR, double_type_node, arg); + } + } + arg = builtin_save_expr (arg); + return fold_build2_loc (loc, UNORDERED_EXPR, type, arg, arg); + + default: + gcc_unreachable (); + } +} - case BUILT_IN_STRCSPN: - return fold_builtin_strcspn (loc, expr, arg0, arg1); +/* Fold a call to __builtin_fpclassify(int, int, int, int, int, ...). + This builtin will generate code to return the appropriate floating + point classification depending on the value of the floating point + number passed in. The possible return values must be supplied as + int arguments to the call in the following order: FP_NAN, FP_INFINITE, + FP_NORMAL, FP_SUBNORMAL and FP_ZERO. The ellipses is for exactly + one floating point argument which is "type generic". */ - case BUILT_IN_STRPBRK: - return fold_builtin_strpbrk (loc, expr, arg0, arg1, type); +static tree +fold_builtin_fpclassify (location_t loc, tree *args, int nargs) +{ + tree fp_nan, fp_infinite, fp_normal, fp_subnormal, fp_zero, + arg, type, res, tmp; + machine_mode mode; + REAL_VALUE_TYPE r; + char buf[128]; - case BUILT_IN_EXPECT: - return fold_builtin_expect (loc, arg0, arg1, NULL_TREE, NULL_TREE); + /* Verify the required arguments in the original call. */ + if (nargs != 6 + || !validate_arg (args[0], INTEGER_TYPE) + || !validate_arg (args[1], INTEGER_TYPE) + || !validate_arg (args[2], INTEGER_TYPE) + || !validate_arg (args[3], INTEGER_TYPE) + || !validate_arg (args[4], INTEGER_TYPE) + || !validate_arg (args[5], REAL_TYPE)) + return NULL_TREE; - case BUILT_IN_ISGREATER: - return fold_builtin_unordered_cmp (loc, fndecl, - arg0, arg1, UNLE_EXPR, LE_EXPR); - case BUILT_IN_ISGREATEREQUAL: - return fold_builtin_unordered_cmp (loc, fndecl, - arg0, arg1, UNLT_EXPR, LT_EXPR); - case BUILT_IN_ISLESS: - return fold_builtin_unordered_cmp (loc, fndecl, - arg0, arg1, UNGE_EXPR, GE_EXPR); - case BUILT_IN_ISLESSEQUAL: - return fold_builtin_unordered_cmp (loc, fndecl, - arg0, arg1, UNGT_EXPR, GT_EXPR); - case BUILT_IN_ISLESSGREATER: - return fold_builtin_unordered_cmp (loc, fndecl, - arg0, arg1, UNEQ_EXPR, EQ_EXPR); - case BUILT_IN_ISUNORDERED: - return fold_builtin_unordered_cmp (loc, fndecl, - arg0, arg1, UNORDERED_EXPR, - NOP_EXPR); + fp_nan = args[0]; + fp_infinite = args[1]; + fp_normal = args[2]; + fp_subnormal = args[3]; + fp_zero = args[4]; + arg = args[5]; + type = TREE_TYPE (arg); + mode = TYPE_MODE (type); + arg = builtin_save_expr (fold_build1_loc (loc, ABS_EXPR, type, arg)); - /* We do the folding for va_start in the expander. */ - case BUILT_IN_VA_START: - break; + /* fpclassify(x) -> + isnan(x) ? FP_NAN : + (fabs(x) == Inf ? FP_INFINITE : + (fabs(x) >= DBL_MIN ? FP_NORMAL : + (x == 0 ? FP_ZERO : FP_SUBNORMAL))). */ - case BUILT_IN_OBJECT_SIZE: - return fold_builtin_object_size (arg0, arg1); + tmp = fold_build2_loc (loc, EQ_EXPR, integer_type_node, arg, + build_real (type, dconst0)); + res = fold_build3_loc (loc, COND_EXPR, integer_type_node, + tmp, fp_zero, fp_subnormal); - case BUILT_IN_ATOMIC_ALWAYS_LOCK_FREE: - return fold_builtin_atomic_always_lock_free (arg0, arg1); + sprintf (buf, "0x1p%d", REAL_MODE_FORMAT (mode)->emin - 1); + real_from_string (&r, buf); + tmp = fold_build2_loc (loc, GE_EXPR, integer_type_node, + arg, build_real (type, r)); + res = fold_build3_loc (loc, COND_EXPR, integer_type_node, tmp, fp_normal, res); - case BUILT_IN_ATOMIC_IS_LOCK_FREE: - return fold_builtin_atomic_is_lock_free (arg0, arg1); + if (tree_expr_maybe_infinite_p (arg)) + { + real_inf (&r); + tmp = fold_build2_loc (loc, EQ_EXPR, integer_type_node, arg, + build_real (type, r)); + res = fold_build3_loc (loc, COND_EXPR, integer_type_node, tmp, + fp_infinite, res); + } - default: - break; + if (tree_expr_maybe_nan_p (arg)) + { + tmp = fold_build2_loc (loc, ORDERED_EXPR, integer_type_node, arg, arg); + res = fold_build3_loc (loc, COND_EXPR, integer_type_node, tmp, res, fp_nan); } - return NULL_TREE; + + return res; } -/* Fold a call to built-in function FNDECL with 3 arguments, ARG0, ARG1, - and ARG2. - This function returns NULL_TREE if no simplification was possible. */ +/* Fold a call to an unordered comparison function such as + __builtin_isgreater(). FNDECL is the FUNCTION_DECL for the function + being called and ARG0 and ARG1 are the arguments for the call. + UNORDERED_CODE and ORDERED_CODE are comparison codes that give + the opposite of the desired result. UNORDERED_CODE is used + for modes that can hold NaNs and ORDERED_CODE is used for + the rest. */ static tree -fold_builtin_3 (location_t loc, tree fndecl, - tree arg0, tree arg1, tree arg2) +fold_builtin_unordered_cmp (location_t loc, tree fndecl, tree arg0, tree arg1, + enum tree_code unordered_code, + enum tree_code ordered_code) { tree type = TREE_TYPE (TREE_TYPE (fndecl)); - enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); + enum tree_code code; + tree type0, type1; + enum tree_code code0, code1; + tree cmp_type = NULL_TREE; - if (TREE_CODE (arg0) == ERROR_MARK - || TREE_CODE (arg1) == ERROR_MARK - || TREE_CODE (arg2) == ERROR_MARK) - return NULL_TREE; + type0 = TREE_TYPE (arg0); + type1 = TREE_TYPE (arg1); - if (tree ret = fold_const_call (as_combined_fn (fcode), type, - arg0, arg1, arg2)) - return ret; + code0 = TREE_CODE (type0); + code1 = TREE_CODE (type1); - switch (fcode) - { + if (code0 == REAL_TYPE && code1 == REAL_TYPE) + /* Choose the wider of two real types. */ + cmp_type = TYPE_PRECISION (type0) >= TYPE_PRECISION (type1) + ? type0 : type1; + else if (code0 == REAL_TYPE && code1 == INTEGER_TYPE) + cmp_type = type0; + else if (code0 == INTEGER_TYPE && code1 == REAL_TYPE) + cmp_type = type1; - CASE_FLT_FN (BUILT_IN_SINCOS): - return fold_builtin_sincos (loc, arg0, arg1, arg2); + arg0 = fold_convert_loc (loc, cmp_type, arg0); + arg1 = fold_convert_loc (loc, cmp_type, arg1); - CASE_FLT_FN (BUILT_IN_REMQUO): - if (validate_arg (arg0, REAL_TYPE) - && validate_arg (arg1, REAL_TYPE) - && validate_arg (arg2, POINTER_TYPE)) - return do_mpfr_remquo (arg0, arg1, arg2); - break; + if (unordered_code == UNORDERED_EXPR) + { + if (tree_expr_nan_p (arg0) || tree_expr_nan_p (arg1)) + return omit_two_operands_loc (loc, type, integer_one_node, arg0, arg1); + if (!tree_expr_maybe_nan_p (arg0) && !tree_expr_maybe_nan_p (arg1)) + return omit_two_operands_loc (loc, type, integer_zero_node, arg0, arg1); + return fold_build2_loc (loc, UNORDERED_EXPR, type, arg0, arg1); + } - case BUILT_IN_MEMCMP: - return fold_builtin_memcmp (loc, arg0, arg1, arg2); + code = (tree_expr_maybe_nan_p (arg0) || tree_expr_maybe_nan_p (arg1)) + ? unordered_code : ordered_code; + return fold_build1_loc (loc, TRUTH_NOT_EXPR, type, + fold_build2_loc (loc, code, type, arg0, arg1)); +} - case BUILT_IN_EXPECT: - return fold_builtin_expect (loc, arg0, arg1, arg2, NULL_TREE); +/* Fold __builtin_{,s,u}{add,sub,mul}{,l,ll}_overflow, either into normal + arithmetics if it can never overflow, or into internal functions that + return both result of arithmetics and overflowed boolean flag in + a complex integer result, or some other check for overflow. + Similarly fold __builtin_{add,sub,mul}_overflow_p to just the overflow + checking part of that. */ - case BUILT_IN_EXPECT_WITH_PROBABILITY: - return fold_builtin_expect (loc, arg0, arg1, NULL_TREE, arg2); +static tree +fold_builtin_arith_overflow (location_t loc, enum built_in_function fcode, + tree arg0, tree arg1, tree arg2) +{ + enum internal_fn ifn = IFN_LAST; + /* The code of the expression corresponding to the built-in. */ + enum tree_code opcode = ERROR_MARK; + bool ovf_only = false; - case BUILT_IN_ADD_OVERFLOW: - case BUILT_IN_SUB_OVERFLOW: - case BUILT_IN_MUL_OVERFLOW: + switch (fcode) + { case BUILT_IN_ADD_OVERFLOW_P: - case BUILT_IN_SUB_OVERFLOW_P: - case BUILT_IN_MUL_OVERFLOW_P: + ovf_only = true; + /* FALLTHRU */ + case BUILT_IN_ADD_OVERFLOW: case BUILT_IN_SADD_OVERFLOW: case BUILT_IN_SADDL_OVERFLOW: case BUILT_IN_SADDLL_OVERFLOW: - case BUILT_IN_SSUB_OVERFLOW: - case BUILT_IN_SSUBL_OVERFLOW: - case BUILT_IN_SSUBLL_OVERFLOW: - case BUILT_IN_SMUL_OVERFLOW: - case BUILT_IN_SMULL_OVERFLOW: - case BUILT_IN_SMULLL_OVERFLOW: case BUILT_IN_UADD_OVERFLOW: case BUILT_IN_UADDL_OVERFLOW: case BUILT_IN_UADDLL_OVERFLOW: + opcode = PLUS_EXPR; + ifn = IFN_ADD_OVERFLOW; + break; + case BUILT_IN_SUB_OVERFLOW_P: + ovf_only = true; + /* FALLTHRU */ + case BUILT_IN_SUB_OVERFLOW: + case BUILT_IN_SSUB_OVERFLOW: + case BUILT_IN_SSUBL_OVERFLOW: + case BUILT_IN_SSUBLL_OVERFLOW: case BUILT_IN_USUB_OVERFLOW: case BUILT_IN_USUBL_OVERFLOW: case BUILT_IN_USUBLL_OVERFLOW: + opcode = MINUS_EXPR; + ifn = IFN_SUB_OVERFLOW; + break; + case BUILT_IN_MUL_OVERFLOW_P: + ovf_only = true; + /* FALLTHRU */ + case BUILT_IN_MUL_OVERFLOW: + case BUILT_IN_SMUL_OVERFLOW: + case BUILT_IN_SMULL_OVERFLOW: + case BUILT_IN_SMULLL_OVERFLOW: case BUILT_IN_UMUL_OVERFLOW: case BUILT_IN_UMULL_OVERFLOW: case BUILT_IN_UMULLL_OVERFLOW: - return fold_builtin_arith_overflow (loc, fcode, arg0, arg1, arg2); - - default: + opcode = MULT_EXPR; + ifn = IFN_MUL_OVERFLOW; break; + default: + gcc_unreachable (); + } + + /* For the "generic" overloads, the first two arguments can have different + types and the last argument determines the target type to use to check + for overflow. The arguments of the other overloads all have the same + type. */ + tree type = ovf_only ? TREE_TYPE (arg2) : TREE_TYPE (TREE_TYPE (arg2)); + + /* For the __builtin_{add,sub,mul}_overflow_p builtins, when the first two + arguments are constant, attempt to fold the built-in call into a constant + expression indicating whether or not it detected an overflow. */ + if (ovf_only + && TREE_CODE (arg0) == INTEGER_CST + && TREE_CODE (arg1) == INTEGER_CST) + /* Perform the computation in the target type and check for overflow. */ + return omit_one_operand_loc (loc, boolean_type_node, + arith_overflowed_p (opcode, type, arg0, arg1) + ? boolean_true_node : boolean_false_node, + arg2); + + tree intres, ovfres; + if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST) + { + intres = fold_binary_loc (loc, opcode, type, + fold_convert_loc (loc, type, arg0), + fold_convert_loc (loc, type, arg1)); + if (TREE_OVERFLOW (intres)) + intres = drop_tree_overflow (intres); + ovfres = (arith_overflowed_p (opcode, type, arg0, arg1) + ? boolean_true_node : boolean_false_node); } - return NULL_TREE; + else + { + tree ctype = build_complex_type (type); + tree call = build_call_expr_internal_loc (loc, ifn, ctype, 2, + arg0, arg1); + tree tgt = save_expr (call); + intres = build1_loc (loc, REALPART_EXPR, type, tgt); + ovfres = build1_loc (loc, IMAGPART_EXPR, type, tgt); + ovfres = fold_convert_loc (loc, boolean_type_node, ovfres); + } + + if (ovf_only) + return omit_one_operand_loc (loc, boolean_type_node, ovfres, arg2); + + tree mem_arg2 = build_fold_indirect_ref_loc (loc, arg2); + tree store + = fold_build2_loc (loc, MODIFY_EXPR, void_type_node, mem_arg2, intres); + return build2_loc (loc, COMPOUND_EXPR, boolean_type_node, store, ovfres); } -/* Folds a call EXPR (which may be null) to built-in function FNDECL. - ARGS is an array of NARGS arguments. IGNORE is true if the result - of the function call is ignored. This function returns NULL_TREE - if no simplification was possible. */ +/* Fold a call to __builtin_FILE to a constant string. */ -static tree -fold_builtin_n (location_t loc, tree expr, tree fndecl, tree *args, - int nargs, bool) +static inline tree +fold_builtin_FILE (location_t loc) { - tree ret = NULL_TREE; - - switch (nargs) - { - case 0: - ret = fold_builtin_0 (loc, fndecl); - break; - case 1: - ret = fold_builtin_1 (loc, expr, fndecl, args[0]); - break; - case 2: - ret = fold_builtin_2 (loc, expr, fndecl, args[0], args[1]); - break; - case 3: - ret = fold_builtin_3 (loc, fndecl, args[0], args[1], args[2]); - break; - default: - ret = fold_builtin_varargs (loc, fndecl, args, nargs); - break; - } - if (ret) + if (const char *fname = LOCATION_FILE (loc)) { - ret = build1 (NOP_EXPR, TREE_TYPE (ret), ret); - SET_EXPR_LOCATION (ret, loc); - return ret; + /* The documentation says this builtin is equivalent to the preprocessor + __FILE__ macro so it appears appropriate to use the same file prefix + mappings. */ + fname = remap_macro_filename (fname); + return build_string_literal (strlen (fname) + 1, fname); } - return NULL_TREE; + + return build_string_literal (1, ""); } -/* Construct a new CALL_EXPR to FNDECL using the tail of the argument - list ARGS along with N new arguments in NEWARGS. SKIP is the number - of arguments in ARGS to be omitted. OLDNARGS is the number of - elements in ARGS. */ +/* Fold a call to __builtin_FUNCTION to a constant string. */ -static tree -rewrite_call_expr_valist (location_t loc, int oldnargs, tree *args, - int skip, tree fndecl, int n, va_list newargs) +static inline tree +fold_builtin_FUNCTION () { - int nargs = oldnargs - skip + n; - tree *buffer; - - if (n > 0) - { - int i, j; + const char *name = ""; - buffer = XALLOCAVEC (tree, nargs); - for (i = 0; i < n; i++) - buffer[i] = va_arg (newargs, tree); - for (j = skip; j < oldnargs; j++, i++) - buffer[i] = args[j]; - } - else - buffer = args + skip; + if (current_function_decl) + name = lang_hooks.decl_printable_name (current_function_decl, 0); - return build_call_expr_loc_array (loc, fndecl, nargs, buffer); + return build_string_literal (strlen (name) + 1, name); } -/* Return true if FNDECL shouldn't be folded right now. - If a built-in function has an inline attribute always_inline - wrapper, defer folding it after always_inline functions have - been inlined, otherwise e.g. -D_FORTIFY_SOURCE checking - might not be performed. */ +/* Fold a call to __builtin_LINE to an integer constant. */ -bool -avoid_folding_inline_builtin (tree fndecl) +static inline tree +fold_builtin_LINE (location_t loc, tree type) { - return (DECL_DECLARED_INLINE_P (fndecl) - && DECL_DISREGARD_INLINE_LIMITS (fndecl) - && cfun - && !cfun->always_inline_functions_inlined - && lookup_attribute ("always_inline", DECL_ATTRIBUTES (fndecl))); + return build_int_cst (type, LOCATION_LINE (loc)); } -/* A wrapper function for builtin folding that prevents warnings for - "statement without effect" and the like, caused by removing the - call node earlier than the warning is generated. */ +/* Fold a call to built-in function FNDECL with 0 arguments. + This function returns NULL_TREE if no simplification was possible. */ -tree -fold_call_expr (location_t loc, tree exp, bool ignore) +static tree +fold_builtin_0 (location_t loc, tree fndecl) { - tree ret = NULL_TREE; - tree fndecl = get_callee_fndecl (exp); - if (fndecl && fndecl_built_in_p (fndecl) - /* If CALL_EXPR_VA_ARG_PACK is set, the arguments aren't finalized - yet. Defer folding until we see all the arguments - (after inlining). */ - && !CALL_EXPR_VA_ARG_PACK (exp)) + tree type = TREE_TYPE (TREE_TYPE (fndecl)); + enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); + switch (fcode) { - int nargs = call_expr_nargs (exp); + case BUILT_IN_FILE: + return fold_builtin_FILE (loc); - /* Before gimplification CALL_EXPR_VA_ARG_PACK is not set, but - instead last argument is __builtin_va_arg_pack (). Defer folding - even in that case, until arguments are finalized. */ - if (nargs && TREE_CODE (CALL_EXPR_ARG (exp, nargs - 1)) == CALL_EXPR) - { - tree fndecl2 = get_callee_fndecl (CALL_EXPR_ARG (exp, nargs - 1)); - if (fndecl2 && fndecl_built_in_p (fndecl2, BUILT_IN_VA_ARG_PACK)) - return NULL_TREE; - } + case BUILT_IN_FUNCTION: + return fold_builtin_FUNCTION (); - if (avoid_folding_inline_builtin (fndecl)) - return NULL_TREE; + case BUILT_IN_LINE: + return fold_builtin_LINE (loc, type); - if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD) - return targetm.fold_builtin (fndecl, call_expr_nargs (exp), - CALL_EXPR_ARGP (exp), ignore); - else - { - tree *args = CALL_EXPR_ARGP (exp); - ret = fold_builtin_n (loc, exp, fndecl, args, nargs, ignore); - if (ret) - return ret; - } - } - return NULL_TREE; -} + CASE_FLT_FN (BUILT_IN_INF): + CASE_FLT_FN_FLOATN_NX (BUILT_IN_INF): + case BUILT_IN_INFD32: + case BUILT_IN_INFD64: + case BUILT_IN_INFD128: + return fold_builtin_inf (loc, type, true); -/* Fold a CALL_EXPR with type TYPE with FN as the function expression. - N arguments are passed in the array ARGARRAY. Return a folded - expression or NULL_TREE if no simplification was possible. */ + CASE_FLT_FN (BUILT_IN_HUGE_VAL): + CASE_FLT_FN_FLOATN_NX (BUILT_IN_HUGE_VAL): + return fold_builtin_inf (loc, type, false); -tree -fold_builtin_call_array (location_t loc, tree, - tree fn, - int n, - tree *argarray) -{ - if (TREE_CODE (fn) != ADDR_EXPR) - return NULL_TREE; + case BUILT_IN_CLASSIFY_TYPE: + return fold_builtin_classify_type (NULL_TREE); - tree fndecl = TREE_OPERAND (fn, 0); - if (TREE_CODE (fndecl) == FUNCTION_DECL - && fndecl_built_in_p (fndecl)) - { - /* If last argument is __builtin_va_arg_pack (), arguments to this - function are not finalized yet. Defer folding until they are. */ - if (n && TREE_CODE (argarray[n - 1]) == CALL_EXPR) - { - tree fndecl2 = get_callee_fndecl (argarray[n - 1]); - if (fndecl2 && fndecl_built_in_p (fndecl2, BUILT_IN_VA_ARG_PACK)) - return NULL_TREE; - } - if (avoid_folding_inline_builtin (fndecl)) - return NULL_TREE; - if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD) - return targetm.fold_builtin (fndecl, n, argarray, false); - else - return fold_builtin_n (loc, NULL_TREE, fndecl, argarray, n, false); + default: + break; } - return NULL_TREE; } -/* Construct a new CALL_EXPR using the tail of the argument list of EXP - along with N new arguments specified as the "..." parameters. SKIP - is the number of arguments in EXP to be omitted. This function is used - to do varargs-to-varargs transformations. */ +/* Fold a call to built-in function FNDECL with 1 argument, ARG0. + This function returns NULL_TREE if no simplification was possible. */ static tree -rewrite_call_expr (location_t loc, tree exp, int skip, tree fndecl, int n, ...) +fold_builtin_1 (location_t loc, tree expr, tree fndecl, tree arg0) { - va_list ap; - tree t; + tree type = TREE_TYPE (TREE_TYPE (fndecl)); + enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); - va_start (ap, n); - t = rewrite_call_expr_valist (loc, call_expr_nargs (exp), - CALL_EXPR_ARGP (exp), skip, fndecl, n, ap); - va_end (ap); + if (TREE_CODE (arg0) == ERROR_MARK) + return NULL_TREE; - return t; -} + if (tree ret = fold_const_call (as_combined_fn (fcode), type, arg0)) + return ret; -/* Validate a single argument ARG against a tree code CODE representing - a type. Return true when argument is valid. */ + switch (fcode) + { + case BUILT_IN_CONSTANT_P: + { + tree val = fold_builtin_constant_p (arg0); -static bool -validate_arg (const_tree arg, enum tree_code code) -{ - if (!arg) - return false; - else if (code == POINTER_TYPE) - return POINTER_TYPE_P (TREE_TYPE (arg)); - else if (code == INTEGER_TYPE) - return INTEGRAL_TYPE_P (TREE_TYPE (arg)); - return code == TREE_CODE (TREE_TYPE (arg)); -} + /* Gimplification will pull the CALL_EXPR for the builtin out of + an if condition. When not optimizing, we'll not CSE it back. + To avoid link error types of regressions, return false now. */ + if (!val && !optimize) + val = integer_zero_node; + + return val; + } + + case BUILT_IN_CLASSIFY_TYPE: + return fold_builtin_classify_type (arg0); + + case BUILT_IN_STRLEN: + return fold_builtin_strlen (loc, expr, type, arg0); + + CASE_FLT_FN (BUILT_IN_FABS): + CASE_FLT_FN_FLOATN_NX (BUILT_IN_FABS): + case BUILT_IN_FABSD32: + case BUILT_IN_FABSD64: + case BUILT_IN_FABSD128: + return fold_builtin_fabs (loc, arg0, type); -/* This function validates the types of a function call argument list - against a specified list of tree_codes. If the last specifier is a 0, - that represents an ellipses, otherwise the last specifier must be a - VOID_TYPE. + case BUILT_IN_ABS: + case BUILT_IN_LABS: + case BUILT_IN_LLABS: + case BUILT_IN_IMAXABS: + return fold_builtin_abs (loc, arg0, type); - This is the GIMPLE version of validate_arglist. Eventually we want to - completely convert builtins.c to work from GIMPLEs and the tree based - validate_arglist will then be removed. */ + CASE_FLT_FN (BUILT_IN_CONJ): + if (validate_arg (arg0, COMPLEX_TYPE) + && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE) + return fold_build1_loc (loc, CONJ_EXPR, type, arg0); + break; -bool -validate_gimple_arglist (const gcall *call, ...) -{ - enum tree_code code; - bool res = 0; - va_list ap; - const_tree arg; - size_t i; + CASE_FLT_FN (BUILT_IN_CREAL): + if (validate_arg (arg0, COMPLEX_TYPE) + && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE) + return non_lvalue_loc (loc, fold_build1_loc (loc, REALPART_EXPR, type, arg0)); + break; - va_start (ap, call); - i = 0; + CASE_FLT_FN (BUILT_IN_CIMAG): + if (validate_arg (arg0, COMPLEX_TYPE) + && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE) + return non_lvalue_loc (loc, fold_build1_loc (loc, IMAGPART_EXPR, type, arg0)); + break; - do - { - code = (enum tree_code) va_arg (ap, int); - switch (code) - { - case 0: - /* This signifies an ellipses, any further arguments are all ok. */ - res = true; - goto end; - case VOID_TYPE: - /* This signifies an endlink, if no arguments remain, return - true, otherwise return false. */ - res = (i == gimple_call_num_args (call)); - goto end; - default: - /* If no parameters remain or the parameter's code does not - match the specified code, return false. Otherwise continue - checking any remaining arguments. */ - arg = gimple_call_arg (call, i++); - if (!validate_arg (arg, code)) - goto end; - break; - } - } - while (1); + CASE_FLT_FN (BUILT_IN_CARG): + return fold_builtin_carg (loc, arg0, type); - /* We need gotos here since we can only have one VA_CLOSE in a - function. */ - end: ; - va_end (ap); + case BUILT_IN_ISASCII: + return fold_builtin_isascii (loc, arg0); - return res; -} + case BUILT_IN_TOASCII: + return fold_builtin_toascii (loc, arg0); -/* Default target-specific builtin expander that does nothing. */ + case BUILT_IN_ISDIGIT: + return fold_builtin_isdigit (loc, arg0); -rtx -default_expand_builtin (tree exp ATTRIBUTE_UNUSED, - rtx target ATTRIBUTE_UNUSED, - rtx subtarget ATTRIBUTE_UNUSED, - machine_mode mode ATTRIBUTE_UNUSED, - int ignore ATTRIBUTE_UNUSED) -{ - return NULL_RTX; -} + CASE_FLT_FN (BUILT_IN_FINITE): + case BUILT_IN_FINITED32: + case BUILT_IN_FINITED64: + case BUILT_IN_FINITED128: + case BUILT_IN_ISFINITE: + { + tree ret = fold_builtin_classify (loc, fndecl, arg0, BUILT_IN_ISFINITE); + if (ret) + return ret; + return fold_builtin_interclass_mathfn (loc, fndecl, arg0); + } -/* Returns true is EXP represents data that would potentially reside - in a readonly section. */ + CASE_FLT_FN (BUILT_IN_ISINF): + case BUILT_IN_ISINFD32: + case BUILT_IN_ISINFD64: + case BUILT_IN_ISINFD128: + { + tree ret = fold_builtin_classify (loc, fndecl, arg0, BUILT_IN_ISINF); + if (ret) + return ret; + return fold_builtin_interclass_mathfn (loc, fndecl, arg0); + } -bool -readonly_data_expr (tree exp) -{ - STRIP_NOPS (exp); + case BUILT_IN_ISNORMAL: + return fold_builtin_interclass_mathfn (loc, fndecl, arg0); - if (TREE_CODE (exp) != ADDR_EXPR) - return false; + case BUILT_IN_ISINF_SIGN: + return fold_builtin_classify (loc, fndecl, arg0, BUILT_IN_ISINF_SIGN); - exp = get_base_address (TREE_OPERAND (exp, 0)); - if (!exp) - return false; + CASE_FLT_FN (BUILT_IN_ISNAN): + case BUILT_IN_ISNAND32: + case BUILT_IN_ISNAND64: + case BUILT_IN_ISNAND128: + return fold_builtin_classify (loc, fndecl, arg0, BUILT_IN_ISNAN); - /* Make sure we call decl_readonly_section only for trees it - can handle (since it returns true for everything it doesn't - understand). */ - if (TREE_CODE (exp) == STRING_CST - || TREE_CODE (exp) == CONSTRUCTOR - || (VAR_P (exp) && TREE_STATIC (exp))) - return decl_readonly_section (exp, 0); - else - return false; -} + case BUILT_IN_FREE: + if (integer_zerop (arg0)) + return build_empty_stmt (loc); + break; -/* Simplify a call to the strpbrk builtin. S1 and S2 are the arguments - to the call, and TYPE is its return type. + default: + break; + } - Return NULL_TREE if no simplification was possible, otherwise return the - simplified form of the call as a tree. + return NULL_TREE; - The simplified form may be a constant or other expression which - computes the same value, but in a more efficient manner (including - calls to other builtin functions). +} - The call may contain arguments which need to be evaluated, but - which are not useful to determine the result of the call. In - this case we return a chain of COMPOUND_EXPRs. The LHS of each - COMPOUND_EXPR will be an argument which must be evaluated. - COMPOUND_EXPRs are chained through their RHS. The RHS of the last - COMPOUND_EXPR in the chain will contain the tree for the simplified - form of the builtin function call. */ +/* Folds a call EXPR (which may be null) to built-in function FNDECL + with 2 arguments, ARG0 and ARG1. This function returns NULL_TREE + if no simplification was possible. */ static tree -fold_builtin_strpbrk (location_t loc, tree, tree s1, tree s2, tree type) +fold_builtin_2 (location_t loc, tree expr, tree fndecl, tree arg0, tree arg1) { - if (!validate_arg (s1, POINTER_TYPE) - || !validate_arg (s2, POINTER_TYPE)) - return NULL_TREE; - - tree fn; - const char *p1, *p2; + tree type = TREE_TYPE (TREE_TYPE (fndecl)); + enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); - p2 = c_getstr (s2); - if (p2 == NULL) + if (TREE_CODE (arg0) == ERROR_MARK + || TREE_CODE (arg1) == ERROR_MARK) return NULL_TREE; - p1 = c_getstr (s1); - if (p1 != NULL) + if (tree ret = fold_const_call (as_combined_fn (fcode), type, arg0, arg1)) + return ret; + + switch (fcode) { - const char *r = strpbrk (p1, p2); - tree tem; + CASE_FLT_FN_REENT (BUILT_IN_GAMMA): /* GAMMA_R */ + CASE_FLT_FN_REENT (BUILT_IN_LGAMMA): /* LGAMMA_R */ + if (validate_arg (arg0, REAL_TYPE) + && validate_arg (arg1, POINTER_TYPE)) + return do_mpfr_lgamma_r (arg0, arg1, type); + break; - if (r == NULL) - return build_int_cst (TREE_TYPE (s1), 0); + CASE_FLT_FN (BUILT_IN_FREXP): + return fold_builtin_frexp (loc, arg0, arg1, type); - /* Return an offset into the constant string argument. */ - tem = fold_build_pointer_plus_hwi_loc (loc, s1, r - p1); - return fold_convert_loc (loc, type, tem); - } + CASE_FLT_FN (BUILT_IN_MODF): + return fold_builtin_modf (loc, arg0, arg1, type); - if (p2[0] == '\0') - /* strpbrk(x, "") == NULL. - Evaluate and ignore s1 in case it had side-effects. */ - return omit_one_operand_loc (loc, type, integer_zero_node, s1); + case BUILT_IN_STRSPN: + return fold_builtin_strspn (loc, expr, arg0, arg1); - if (p2[1] != '\0') - return NULL_TREE; /* Really call strpbrk. */ + case BUILT_IN_STRCSPN: + return fold_builtin_strcspn (loc, expr, arg0, arg1); - fn = builtin_decl_implicit (BUILT_IN_STRCHR); - if (!fn) - return NULL_TREE; + case BUILT_IN_STRPBRK: + return fold_builtin_strpbrk (loc, expr, arg0, arg1, type); - /* New argument list transforming strpbrk(s1, s2) to - strchr(s1, s2[0]). */ - return build_call_expr_loc (loc, fn, 2, s1, - build_int_cst (integer_type_node, p2[0])); -} + case BUILT_IN_EXPECT: + return fold_builtin_expect (loc, arg0, arg1, NULL_TREE, NULL_TREE); + + case BUILT_IN_ISGREATER: + return fold_builtin_unordered_cmp (loc, fndecl, + arg0, arg1, UNLE_EXPR, LE_EXPR); + case BUILT_IN_ISGREATEREQUAL: + return fold_builtin_unordered_cmp (loc, fndecl, + arg0, arg1, UNLT_EXPR, LT_EXPR); + case BUILT_IN_ISLESS: + return fold_builtin_unordered_cmp (loc, fndecl, + arg0, arg1, UNGE_EXPR, GE_EXPR); + case BUILT_IN_ISLESSEQUAL: + return fold_builtin_unordered_cmp (loc, fndecl, + arg0, arg1, UNGT_EXPR, GT_EXPR); + case BUILT_IN_ISLESSGREATER: + return fold_builtin_unordered_cmp (loc, fndecl, + arg0, arg1, UNEQ_EXPR, EQ_EXPR); + case BUILT_IN_ISUNORDERED: + return fold_builtin_unordered_cmp (loc, fndecl, + arg0, arg1, UNORDERED_EXPR, + NOP_EXPR); + + /* We do the folding for va_start in the expander. */ + case BUILT_IN_VA_START: + break; + + case BUILT_IN_OBJECT_SIZE: + return fold_builtin_object_size (arg0, arg1); -/* Simplify a call to the strspn builtin. S1 and S2 are the arguments - to the call. + case BUILT_IN_ATOMIC_ALWAYS_LOCK_FREE: + return fold_builtin_atomic_always_lock_free (arg0, arg1); - Return NULL_TREE if no simplification was possible, otherwise return the - simplified form of the call as a tree. + case BUILT_IN_ATOMIC_IS_LOCK_FREE: + return fold_builtin_atomic_is_lock_free (arg0, arg1); - The simplified form may be a constant or other expression which - computes the same value, but in a more efficient manner (including - calls to other builtin functions). + default: + break; + } + return NULL_TREE; +} - The call may contain arguments which need to be evaluated, but - which are not useful to determine the result of the call. In - this case we return a chain of COMPOUND_EXPRs. The LHS of each - COMPOUND_EXPR will be an argument which must be evaluated. - COMPOUND_EXPRs are chained through their RHS. The RHS of the last - COMPOUND_EXPR in the chain will contain the tree for the simplified - form of the builtin function call. */ +/* Fold a call to built-in function FNDECL with 3 arguments, ARG0, ARG1, + and ARG2. + This function returns NULL_TREE if no simplification was possible. */ static tree -fold_builtin_strspn (location_t loc, tree expr, tree s1, tree s2) +fold_builtin_3 (location_t loc, tree fndecl, + tree arg0, tree arg1, tree arg2) { - if (!validate_arg (s1, POINTER_TYPE) - || !validate_arg (s2, POINTER_TYPE)) - return NULL_TREE; + tree type = TREE_TYPE (TREE_TYPE (fndecl)); + enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); - if (!check_nul_terminated_array (expr, s1) - || !check_nul_terminated_array (expr, s2)) + if (TREE_CODE (arg0) == ERROR_MARK + || TREE_CODE (arg1) == ERROR_MARK + || TREE_CODE (arg2) == ERROR_MARK) return NULL_TREE; - const char *p1 = c_getstr (s1), *p2 = c_getstr (s2); + if (tree ret = fold_const_call (as_combined_fn (fcode), type, + arg0, arg1, arg2)) + return ret; - /* If either argument is "", return NULL_TREE. */ - if ((p1 && *p1 == '\0') || (p2 && *p2 == '\0')) - /* Evaluate and ignore both arguments in case either one has - side-effects. */ - return omit_two_operands_loc (loc, size_type_node, size_zero_node, - s1, s2); - return NULL_TREE; -} + switch (fcode) + { -/* Simplify a call to the strcspn builtin. S1 and S2 are the arguments - to the call. + CASE_FLT_FN (BUILT_IN_SINCOS): + return fold_builtin_sincos (loc, arg0, arg1, arg2); - Return NULL_TREE if no simplification was possible, otherwise return the - simplified form of the call as a tree. + CASE_FLT_FN (BUILT_IN_REMQUO): + if (validate_arg (arg0, REAL_TYPE) + && validate_arg (arg1, REAL_TYPE) + && validate_arg (arg2, POINTER_TYPE)) + return do_mpfr_remquo (arg0, arg1, arg2); + break; - The simplified form may be a constant or other expression which - computes the same value, but in a more efficient manner (including - calls to other builtin functions). + case BUILT_IN_MEMCMP: + return fold_builtin_memcmp (loc, arg0, arg1, arg2); - The call may contain arguments which need to be evaluated, but - which are not useful to determine the result of the call. In - this case we return a chain of COMPOUND_EXPRs. The LHS of each - COMPOUND_EXPR will be an argument which must be evaluated. - COMPOUND_EXPRs are chained through their RHS. The RHS of the last - COMPOUND_EXPR in the chain will contain the tree for the simplified - form of the builtin function call. */ + case BUILT_IN_EXPECT: + return fold_builtin_expect (loc, arg0, arg1, arg2, NULL_TREE); -static tree -fold_builtin_strcspn (location_t loc, tree expr, tree s1, tree s2) -{ - if (!validate_arg (s1, POINTER_TYPE) - || !validate_arg (s2, POINTER_TYPE)) - return NULL_TREE; + case BUILT_IN_EXPECT_WITH_PROBABILITY: + return fold_builtin_expect (loc, arg0, arg1, NULL_TREE, arg2); - if (!check_nul_terminated_array (expr, s1) - || !check_nul_terminated_array (expr, s2)) - return NULL_TREE; + case BUILT_IN_ADD_OVERFLOW: + case BUILT_IN_SUB_OVERFLOW: + case BUILT_IN_MUL_OVERFLOW: + case BUILT_IN_ADD_OVERFLOW_P: + case BUILT_IN_SUB_OVERFLOW_P: + case BUILT_IN_MUL_OVERFLOW_P: + case BUILT_IN_SADD_OVERFLOW: + case BUILT_IN_SADDL_OVERFLOW: + case BUILT_IN_SADDLL_OVERFLOW: + case BUILT_IN_SSUB_OVERFLOW: + case BUILT_IN_SSUBL_OVERFLOW: + case BUILT_IN_SSUBLL_OVERFLOW: + case BUILT_IN_SMUL_OVERFLOW: + case BUILT_IN_SMULL_OVERFLOW: + case BUILT_IN_SMULLL_OVERFLOW: + case BUILT_IN_UADD_OVERFLOW: + case BUILT_IN_UADDL_OVERFLOW: + case BUILT_IN_UADDLL_OVERFLOW: + case BUILT_IN_USUB_OVERFLOW: + case BUILT_IN_USUBL_OVERFLOW: + case BUILT_IN_USUBLL_OVERFLOW: + case BUILT_IN_UMUL_OVERFLOW: + case BUILT_IN_UMULL_OVERFLOW: + case BUILT_IN_UMULLL_OVERFLOW: + return fold_builtin_arith_overflow (loc, fcode, arg0, arg1, arg2); - /* If the first argument is "", return NULL_TREE. */ - const char *p1 = c_getstr (s1); - if (p1 && *p1 == '\0') - { - /* Evaluate and ignore argument s2 in case it has - side-effects. */ - return omit_one_operand_loc (loc, size_type_node, - size_zero_node, s2); + default: + break; } + return NULL_TREE; +} - /* If the second argument is "", return __builtin_strlen(s1). */ - const char *p2 = c_getstr (s2); - if (p2 && *p2 == '\0') - { - tree fn = builtin_decl_implicit (BUILT_IN_STRLEN); +/* Folds a call EXPR (which may be null) to built-in function FNDECL. + ARGS is an array of NARGS arguments. IGNORE is true if the result + of the function call is ignored. This function returns NULL_TREE + if no simplification was possible. */ - /* If the replacement _DECL isn't initialized, don't do the - transformation. */ - if (!fn) - return NULL_TREE; +static tree +fold_builtin_n (location_t loc, tree expr, tree fndecl, tree *args, + int nargs, bool) +{ + tree ret = NULL_TREE; - return build_call_expr_loc (loc, fn, 1, s1); + switch (nargs) + { + case 0: + ret = fold_builtin_0 (loc, fndecl); + break; + case 1: + ret = fold_builtin_1 (loc, expr, fndecl, args[0]); + break; + case 2: + ret = fold_builtin_2 (loc, expr, fndecl, args[0], args[1]); + break; + case 3: + ret = fold_builtin_3 (loc, fndecl, args[0], args[1], args[2]); + break; + default: + ret = fold_builtin_varargs (loc, fndecl, args, nargs); + break; + } + if (ret) + { + ret = build1 (NOP_EXPR, TREE_TYPE (ret), ret); + SET_EXPR_LOCATION (ret, loc); + return ret; } return NULL_TREE; } -/* Fold the next_arg or va_start call EXP. Returns true if there was an error - produced. False otherwise. This is done so that we don't output the error - or warning twice or three times. */ +/* Construct a new CALL_EXPR to FNDECL using the tail of the argument + list ARGS along with N new arguments in NEWARGS. SKIP is the number + of arguments in ARGS to be omitted. OLDNARGS is the number of + elements in ARGS. */ -bool -fold_builtin_next_arg (tree exp, bool va_start_p) +static tree +rewrite_call_expr_valist (location_t loc, int oldnargs, tree *args, + int skip, tree fndecl, int n, va_list newargs) { - tree fntype = TREE_TYPE (current_function_decl); - int nargs = call_expr_nargs (exp); - tree arg; - /* There is good chance the current input_location points inside the - definition of the va_start macro (perhaps on the token for - builtin) in a system header, so warnings will not be emitted. - Use the location in real source code. */ - location_t current_location = - linemap_unwind_to_first_non_reserved_loc (line_table, input_location, - NULL); + int nargs = oldnargs - skip + n; + tree *buffer; - if (!stdarg_p (fntype)) + if (n > 0) { - error ("% used in function with fixed arguments"); - return true; - } + int i, j; - if (va_start_p) - { - if (va_start_p && (nargs != 2)) - { - error ("wrong number of arguments to function %"); - return true; - } - arg = CALL_EXPR_ARG (exp, 1); + buffer = XALLOCAVEC (tree, nargs); + for (i = 0; i < n; i++) + buffer[i] = va_arg (newargs, tree); + for (j = skip; j < oldnargs; j++, i++) + buffer[i] = args[j]; } - /* We use __builtin_va_start (ap, 0, 0) or __builtin_next_arg (0, 0) - when we checked the arguments and if needed issued a warning. */ else - { - if (nargs == 0) - { - /* Evidently an out of date version of ; can't validate - va_start's second argument, but can still work as intended. */ - warning_at (current_location, - OPT_Wvarargs, - "%<__builtin_next_arg%> called without an argument"); - return true; - } - else if (nargs > 1) - { - error ("wrong number of arguments to function %<__builtin_next_arg%>"); - return true; - } - arg = CALL_EXPR_ARG (exp, 0); - } + buffer = args + skip; - if (TREE_CODE (arg) == SSA_NAME - && SSA_NAME_VAR (arg)) - arg = SSA_NAME_VAR (arg); + return build_call_expr_loc_array (loc, fndecl, nargs, buffer); +} + +/* Return true if FNDECL shouldn't be folded right now. + If a built-in function has an inline attribute always_inline + wrapper, defer folding it after always_inline functions have + been inlined, otherwise e.g. -D_FORTIFY_SOURCE checking + might not be performed. */ + +bool +avoid_folding_inline_builtin (tree fndecl) +{ + return (DECL_DECLARED_INLINE_P (fndecl) + && DECL_DISREGARD_INLINE_LIMITS (fndecl) + && cfun + && !cfun->always_inline_functions_inlined + && lookup_attribute ("always_inline", DECL_ATTRIBUTES (fndecl))); +} + +/* A wrapper function for builtin folding that prevents warnings for + "statement without effect" and the like, caused by removing the + call node earlier than the warning is generated. */ - /* We destructively modify the call to be __builtin_va_start (ap, 0) - or __builtin_next_arg (0) the first time we see it, after checking - the arguments and if needed issuing a warning. */ - if (!integer_zerop (arg)) +tree +fold_call_expr (location_t loc, tree exp, bool ignore) +{ + tree ret = NULL_TREE; + tree fndecl = get_callee_fndecl (exp); + if (fndecl && fndecl_built_in_p (fndecl) + /* If CALL_EXPR_VA_ARG_PACK is set, the arguments aren't finalized + yet. Defer folding until we see all the arguments + (after inlining). */ + && !CALL_EXPR_VA_ARG_PACK (exp)) { - tree last_parm = tree_last (DECL_ARGUMENTS (current_function_decl)); + int nargs = call_expr_nargs (exp); - /* Strip off all nops for the sake of the comparison. This - is not quite the same as STRIP_NOPS. It does more. - We must also strip off INDIRECT_EXPR for C++ reference - parameters. */ - while (CONVERT_EXPR_P (arg) - || TREE_CODE (arg) == INDIRECT_REF) - arg = TREE_OPERAND (arg, 0); - if (arg != last_parm) + /* Before gimplification CALL_EXPR_VA_ARG_PACK is not set, but + instead last argument is __builtin_va_arg_pack (). Defer folding + even in that case, until arguments are finalized. */ + if (nargs && TREE_CODE (CALL_EXPR_ARG (exp, nargs - 1)) == CALL_EXPR) { - /* FIXME: Sometimes with the tree optimizers we can get the - not the last argument even though the user used the last - argument. We just warn and set the arg to be the last - argument so that we will get wrong-code because of - it. */ - warning_at (current_location, - OPT_Wvarargs, - "second parameter of % not last named argument"); + tree fndecl2 = get_callee_fndecl (CALL_EXPR_ARG (exp, nargs - 1)); + if (fndecl2 && fndecl_built_in_p (fndecl2, BUILT_IN_VA_ARG_PACK)) + return NULL_TREE; } - /* Undefined by C99 7.15.1.4p4 (va_start): - "If the parameter parmN is declared with the register storage - class, with a function or array type, or with a type that is - not compatible with the type that results after application of - the default argument promotions, the behavior is undefined." - */ - else if (DECL_REGISTER (arg)) - { - warning_at (current_location, - OPT_Wvarargs, - "undefined behavior when second parameter of " - "% is declared with % storage"); - } + if (avoid_folding_inline_builtin (fndecl)) + return NULL_TREE; - /* We want to verify the second parameter just once before the tree - optimizers are run and then avoid keeping it in the tree, - as otherwise we could warn even for correct code like: - void foo (int i, ...) - { va_list ap; i++; va_start (ap, i); va_end (ap); } */ - if (va_start_p) - CALL_EXPR_ARG (exp, 1) = integer_zero_node; + if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD) + return targetm.fold_builtin (fndecl, call_expr_nargs (exp), + CALL_EXPR_ARGP (exp), ignore); else - CALL_EXPR_ARG (exp, 0) = integer_zero_node; + { + tree *args = CALL_EXPR_ARGP (exp); + ret = fold_builtin_n (loc, exp, fndecl, args, nargs, ignore); + if (ret) + return ret; + } } - return false; + return NULL_TREE; } +/* Fold a CALL_EXPR with type TYPE with FN as the function expression. + N arguments are passed in the array ARGARRAY. Return a folded + expression or NULL_TREE if no simplification was possible. */ -/* Expand a call EXP to __builtin_object_size. */ - -static rtx -expand_builtin_object_size (tree exp) +tree +fold_builtin_call_array (location_t loc, tree, + tree fn, + int n, + tree *argarray) { - tree ost; - int object_size_type; - tree fndecl = get_callee_fndecl (exp); + if (TREE_CODE (fn) != ADDR_EXPR) + return NULL_TREE; - if (!validate_arglist (exp, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) + tree fndecl = TREE_OPERAND (fn, 0); + if (TREE_CODE (fndecl) == FUNCTION_DECL + && fndecl_built_in_p (fndecl)) { - error ("first argument of %qD must be a pointer, second integer constant", - fndecl); - expand_builtin_trap (); - return const0_rtx; + /* If last argument is __builtin_va_arg_pack (), arguments to this + function are not finalized yet. Defer folding until they are. */ + if (n && TREE_CODE (argarray[n - 1]) == CALL_EXPR) + { + tree fndecl2 = get_callee_fndecl (argarray[n - 1]); + if (fndecl2 && fndecl_built_in_p (fndecl2, BUILT_IN_VA_ARG_PACK)) + return NULL_TREE; + } + if (avoid_folding_inline_builtin (fndecl)) + return NULL_TREE; + if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD) + return targetm.fold_builtin (fndecl, n, argarray, false); + else + return fold_builtin_n (loc, NULL_TREE, fndecl, argarray, n, false); } - ost = CALL_EXPR_ARG (exp, 1); - STRIP_NOPS (ost); + return NULL_TREE; +} - if (TREE_CODE (ost) != INTEGER_CST - || tree_int_cst_sgn (ost) < 0 - || compare_tree_int (ost, 3) > 0) - { - error ("last argument of %qD is not integer constant between 0 and 3", - fndecl); - expand_builtin_trap (); - return const0_rtx; - } +/* Construct a new CALL_EXPR using the tail of the argument list of EXP + along with N new arguments specified as the "..." parameters. SKIP + is the number of arguments in EXP to be omitted. This function is used + to do varargs-to-varargs transformations. */ - object_size_type = tree_to_shwi (ost); +static tree +rewrite_call_expr (location_t loc, tree exp, int skip, tree fndecl, int n, ...) +{ + va_list ap; + tree t; - return object_size_type < 2 ? constm1_rtx : const0_rtx; + va_start (ap, n); + t = rewrite_call_expr_valist (loc, call_expr_nargs (exp), + CALL_EXPR_ARGP (exp), skip, fndecl, n, ap); + va_end (ap); + + return t; } -/* Expand EXP, a call to the __mem{cpy,pcpy,move,set}_chk builtin. - FCODE is the BUILT_IN_* to use. - Return NULL_RTX if we failed; the caller should emit a normal call, - otherwise try to get the result in TARGET, if convenient (and in - mode MODE if that's convenient). */ +/* Validate a single argument ARG against a tree code CODE representing + a type. Return true when argument is valid. */ -static rtx -expand_builtin_memory_chk (tree exp, rtx target, machine_mode mode, - enum built_in_function fcode) +static bool +validate_arg (const_tree arg, enum tree_code code) { - if (!validate_arglist (exp, - POINTER_TYPE, - fcode == BUILT_IN_MEMSET_CHK - ? INTEGER_TYPE : POINTER_TYPE, - INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE)) - return NULL_RTX; + if (!arg) + return false; + else if (code == POINTER_TYPE) + return POINTER_TYPE_P (TREE_TYPE (arg)); + else if (code == INTEGER_TYPE) + return INTEGRAL_TYPE_P (TREE_TYPE (arg)); + return code == TREE_CODE (TREE_TYPE (arg)); +} - tree dest = CALL_EXPR_ARG (exp, 0); - tree src = CALL_EXPR_ARG (exp, 1); - tree len = CALL_EXPR_ARG (exp, 2); - tree size = CALL_EXPR_ARG (exp, 3); +/* This function validates the types of a function call argument list + against a specified list of tree_codes. If the last specifier is a 0, + that represents an ellipses, otherwise the last specifier must be a + VOID_TYPE. - /* FIXME: Set access mode to write only for memset et al. */ - bool sizes_ok = check_access (exp, len, /*maxread=*/NULL_TREE, - /*srcstr=*/NULL_TREE, size, access_read_write); + This is the GIMPLE version of validate_arglist. Eventually we want to + completely convert builtins.c to work from GIMPLEs and the tree based + validate_arglist will then be removed. */ - if (!tree_fits_uhwi_p (size)) - return NULL_RTX; +bool +validate_gimple_arglist (const gcall *call, ...) +{ + enum tree_code code; + bool res = 0; + va_list ap; + const_tree arg; + size_t i; - if (tree_fits_uhwi_p (len) || integer_all_onesp (size)) - { - /* Avoid transforming the checking call to an ordinary one when - an overflow has been detected or when the call couldn't be - validated because the size is not constant. */ - if (!sizes_ok && !integer_all_onesp (size) && tree_int_cst_lt (size, len)) - return NULL_RTX; + va_start (ap, call); + i = 0; - tree fn = NULL_TREE; - /* If __builtin_mem{cpy,pcpy,move,set}_chk is used, assume - mem{cpy,pcpy,move,set} is available. */ - switch (fcode) + do + { + code = (enum tree_code) va_arg (ap, int); + switch (code) { - case BUILT_IN_MEMCPY_CHK: - fn = builtin_decl_explicit (BUILT_IN_MEMCPY); - break; - case BUILT_IN_MEMPCPY_CHK: - fn = builtin_decl_explicit (BUILT_IN_MEMPCPY); - break; - case BUILT_IN_MEMMOVE_CHK: - fn = builtin_decl_explicit (BUILT_IN_MEMMOVE); - break; - case BUILT_IN_MEMSET_CHK: - fn = builtin_decl_explicit (BUILT_IN_MEMSET); - break; + case 0: + /* This signifies an ellipses, any further arguments are all ok. */ + res = true; + goto end; + case VOID_TYPE: + /* This signifies an endlink, if no arguments remain, return + true, otherwise return false. */ + res = (i == gimple_call_num_args (call)); + goto end; default: + /* If no parameters remain or the parameter's code does not + match the specified code, return false. Otherwise continue + checking any remaining arguments. */ + arg = gimple_call_arg (call, i++); + if (!validate_arg (arg, code)) + goto end; break; } + } + while (1); - if (! fn) - return NULL_RTX; + /* We need gotos here since we can only have one VA_CLOSE in a + function. */ + end: ; + va_end (ap); - fn = build_call_nofold_loc (EXPR_LOCATION (exp), fn, 3, dest, src, len); - gcc_assert (TREE_CODE (fn) == CALL_EXPR); - CALL_EXPR_TAILCALL (fn) = CALL_EXPR_TAILCALL (exp); - return expand_expr (fn, target, mode, EXPAND_NORMAL); - } - else if (fcode == BUILT_IN_MEMSET_CHK) - return NULL_RTX; - else - { - unsigned int dest_align = get_pointer_alignment (dest); + return res; +} - /* If DEST is not a pointer type, call the normal function. */ - if (dest_align == 0) - return NULL_RTX; +/* Default target-specific builtin expander that does nothing. */ - /* If SRC and DEST are the same (and not volatile), do nothing. */ - if (operand_equal_p (src, dest, 0)) - { - tree expr; +rtx +default_expand_builtin (tree exp ATTRIBUTE_UNUSED, + rtx target ATTRIBUTE_UNUSED, + rtx subtarget ATTRIBUTE_UNUSED, + machine_mode mode ATTRIBUTE_UNUSED, + int ignore ATTRIBUTE_UNUSED) +{ + return NULL_RTX; +} - if (fcode != BUILT_IN_MEMPCPY_CHK) - { - /* Evaluate and ignore LEN in case it has side-effects. */ - expand_expr (len, const0_rtx, VOIDmode, EXPAND_NORMAL); - return expand_expr (dest, target, mode, EXPAND_NORMAL); - } +/* Returns true is EXP represents data that would potentially reside + in a readonly section. */ - expr = fold_build_pointer_plus (dest, len); - return expand_expr (expr, target, mode, EXPAND_NORMAL); - } +bool +readonly_data_expr (tree exp) +{ + STRIP_NOPS (exp); - /* __memmove_chk special case. */ - if (fcode == BUILT_IN_MEMMOVE_CHK) - { - unsigned int src_align = get_pointer_alignment (src); + if (TREE_CODE (exp) != ADDR_EXPR) + return false; - if (src_align == 0) - return NULL_RTX; + exp = get_base_address (TREE_OPERAND (exp, 0)); + if (!exp) + return false; - /* If src is categorized for a readonly section we can use - normal __memcpy_chk. */ - if (readonly_data_expr (src)) - { - tree fn = builtin_decl_explicit (BUILT_IN_MEMCPY_CHK); - if (!fn) - return NULL_RTX; - fn = build_call_nofold_loc (EXPR_LOCATION (exp), fn, 4, - dest, src, len, size); - gcc_assert (TREE_CODE (fn) == CALL_EXPR); - CALL_EXPR_TAILCALL (fn) = CALL_EXPR_TAILCALL (exp); - return expand_expr (fn, target, mode, EXPAND_NORMAL); - } - } - return NULL_RTX; - } + /* Make sure we call decl_readonly_section only for trees it + can handle (since it returns true for everything it doesn't + understand). */ + if (TREE_CODE (exp) == STRING_CST + || TREE_CODE (exp) == CONSTRUCTOR + || (VAR_P (exp) && TREE_STATIC (exp))) + return decl_readonly_section (exp, 0); + else + return false; } -/* Emit warning if a buffer overflow is detected at compile time. */ +/* Simplify a call to the strpbrk builtin. S1 and S2 are the arguments + to the call, and TYPE is its return type. -static void -maybe_emit_chk_warning (tree exp, enum built_in_function fcode) -{ - /* The source string. */ - tree srcstr = NULL_TREE; - /* The size of the destination object returned by __builtin_object_size. */ - tree objsize = NULL_TREE; - /* The string that is being concatenated with (as in __strcat_chk) - or null if it isn't. */ - tree catstr = NULL_TREE; - /* The maximum length of the source sequence in a bounded operation - (such as __strncat_chk) or null if the operation isn't bounded - (such as __strcat_chk). */ - tree maxread = NULL_TREE; - /* The exact size of the access (such as in __strncpy_chk). */ - tree size = NULL_TREE; - /* The access by the function that's checked. Except for snprintf - both writing and reading is checked. */ - access_mode mode = access_read_write; + Return NULL_TREE if no simplification was possible, otherwise return the + simplified form of the call as a tree. - switch (fcode) - { - case BUILT_IN_STRCPY_CHK: - case BUILT_IN_STPCPY_CHK: - srcstr = CALL_EXPR_ARG (exp, 1); - objsize = CALL_EXPR_ARG (exp, 2); - break; + The simplified form may be a constant or other expression which + computes the same value, but in a more efficient manner (including + calls to other builtin functions). - case BUILT_IN_STRCAT_CHK: - /* For __strcat_chk the warning will be emitted only if overflowing - by at least strlen (dest) + 1 bytes. */ - catstr = CALL_EXPR_ARG (exp, 0); - srcstr = CALL_EXPR_ARG (exp, 1); - objsize = CALL_EXPR_ARG (exp, 2); - break; + The call may contain arguments which need to be evaluated, but + which are not useful to determine the result of the call. In + this case we return a chain of COMPOUND_EXPRs. The LHS of each + COMPOUND_EXPR will be an argument which must be evaluated. + COMPOUND_EXPRs are chained through their RHS. The RHS of the last + COMPOUND_EXPR in the chain will contain the tree for the simplified + form of the builtin function call. */ - case BUILT_IN_STRNCAT_CHK: - catstr = CALL_EXPR_ARG (exp, 0); - srcstr = CALL_EXPR_ARG (exp, 1); - maxread = CALL_EXPR_ARG (exp, 2); - objsize = CALL_EXPR_ARG (exp, 3); - break; +static tree +fold_builtin_strpbrk (location_t loc, tree, tree s1, tree s2, tree type) +{ + if (!validate_arg (s1, POINTER_TYPE) + || !validate_arg (s2, POINTER_TYPE)) + return NULL_TREE; - case BUILT_IN_STRNCPY_CHK: - case BUILT_IN_STPNCPY_CHK: - srcstr = CALL_EXPR_ARG (exp, 1); - size = CALL_EXPR_ARG (exp, 2); - objsize = CALL_EXPR_ARG (exp, 3); - break; + tree fn; + const char *p1, *p2; - case BUILT_IN_SNPRINTF_CHK: - case BUILT_IN_VSNPRINTF_CHK: - maxread = CALL_EXPR_ARG (exp, 1); - objsize = CALL_EXPR_ARG (exp, 3); - /* The only checked access the write to the destination. */ - mode = access_write_only; - break; - default: - gcc_unreachable (); - } + p2 = c_getstr (s2); + if (p2 == NULL) + return NULL_TREE; - if (catstr && maxread) + p1 = c_getstr (s1); + if (p1 != NULL) { - /* Check __strncat_chk. There is no way to determine the length - of the string to which the source string is being appended so - just warn when the length of the source string is not known. */ - check_strncat_sizes (exp, objsize); - return; + const char *r = strpbrk (p1, p2); + tree tem; + + if (r == NULL) + return build_int_cst (TREE_TYPE (s1), 0); + + /* Return an offset into the constant string argument. */ + tem = fold_build_pointer_plus_hwi_loc (loc, s1, r - p1); + return fold_convert_loc (loc, type, tem); } - check_access (exp, size, maxread, srcstr, objsize, mode); -} + if (p2[0] == '\0') + /* strpbrk(x, "") == NULL. + Evaluate and ignore s1 in case it had side-effects. */ + return omit_one_operand_loc (loc, type, integer_zero_node, s1); -/* Emit warning if a buffer overflow is detected at compile time - in __sprintf_chk/__vsprintf_chk calls. */ + if (p2[1] != '\0') + return NULL_TREE; /* Really call strpbrk. */ -static void -maybe_emit_sprintf_chk_warning (tree exp, enum built_in_function fcode) -{ - tree size, len, fmt; - const char *fmt_str; - int nargs = call_expr_nargs (exp); + fn = builtin_decl_implicit (BUILT_IN_STRCHR); + if (!fn) + return NULL_TREE; - /* Verify the required arguments in the original call. */ + /* New argument list transforming strpbrk(s1, s2) to + strchr(s1, s2[0]). */ + return build_call_expr_loc (loc, fn, 2, s1, + build_int_cst (integer_type_node, p2[0])); +} - if (nargs < 4) - return; - size = CALL_EXPR_ARG (exp, 2); - fmt = CALL_EXPR_ARG (exp, 3); +/* Simplify a call to the strspn builtin. S1 and S2 are the arguments + to the call. - if (! tree_fits_uhwi_p (size) || integer_all_onesp (size)) - return; + Return NULL_TREE if no simplification was possible, otherwise return the + simplified form of the call as a tree. - /* Check whether the format is a literal string constant. */ - fmt_str = c_getstr (fmt); - if (fmt_str == NULL) - return; + The simplified form may be a constant or other expression which + computes the same value, but in a more efficient manner (including + calls to other builtin functions). - if (!init_target_chars ()) - return; + The call may contain arguments which need to be evaluated, but + which are not useful to determine the result of the call. In + this case we return a chain of COMPOUND_EXPRs. The LHS of each + COMPOUND_EXPR will be an argument which must be evaluated. + COMPOUND_EXPRs are chained through their RHS. The RHS of the last + COMPOUND_EXPR in the chain will contain the tree for the simplified + form of the builtin function call. */ - /* If the format doesn't contain % args or %%, we know its size. */ - if (strchr (fmt_str, target_percent) == 0) - len = build_int_cstu (size_type_node, strlen (fmt_str)); - /* If the format is "%s" and first ... argument is a string literal, - we know it too. */ - else if (fcode == BUILT_IN_SPRINTF_CHK - && strcmp (fmt_str, target_percent_s) == 0) - { - tree arg; +static tree +fold_builtin_strspn (location_t loc, tree expr, tree s1, tree s2) +{ + if (!validate_arg (s1, POINTER_TYPE) + || !validate_arg (s2, POINTER_TYPE)) + return NULL_TREE; - if (nargs < 5) - return; - arg = CALL_EXPR_ARG (exp, 4); - if (! POINTER_TYPE_P (TREE_TYPE (arg))) - return; + if (!check_nul_terminated_array (expr, s1) + || !check_nul_terminated_array (expr, s2)) + return NULL_TREE; - len = c_strlen (arg, 1); - if (!len || ! tree_fits_uhwi_p (len)) - return; - } - else - return; + const char *p1 = c_getstr (s1), *p2 = c_getstr (s2); + + /* If either argument is "", return NULL_TREE. */ + if ((p1 && *p1 == '\0') || (p2 && *p2 == '\0')) + /* Evaluate and ignore both arguments in case either one has + side-effects. */ + return omit_two_operands_loc (loc, size_type_node, size_zero_node, + s1, s2); + return NULL_TREE; +} - /* Add one for the terminating nul. */ - len = fold_build2 (PLUS_EXPR, TREE_TYPE (len), len, size_one_node); +/* Simplify a call to the strcspn builtin. S1 and S2 are the arguments + to the call. - check_access (exp, /*size=*/NULL_TREE, /*maxread=*/NULL_TREE, len, size, - access_write_only); -} + Return NULL_TREE if no simplification was possible, otherwise return the + simplified form of the call as a tree. -/* Return true if STMT is a call to an allocation function. Unless - ALL_ALLOC is set, consider only functions that return dynmamically - allocated objects. Otherwise return true even for all forms of - alloca (including VLA). */ + The simplified form may be a constant or other expression which + computes the same value, but in a more efficient manner (including + calls to other builtin functions). -static bool -fndecl_alloc_p (tree fndecl, bool all_alloc) + The call may contain arguments which need to be evaluated, but + which are not useful to determine the result of the call. In + this case we return a chain of COMPOUND_EXPRs. The LHS of each + COMPOUND_EXPR will be an argument which must be evaluated. + COMPOUND_EXPRs are chained through their RHS. The RHS of the last + COMPOUND_EXPR in the chain will contain the tree for the simplified + form of the builtin function call. */ + +static tree +fold_builtin_strcspn (location_t loc, tree expr, tree s1, tree s2) { - if (!fndecl) - return false; + if (!validate_arg (s1, POINTER_TYPE) + || !validate_arg (s2, POINTER_TYPE)) + return NULL_TREE; - /* A call to operator new isn't recognized as one to a built-in. */ - if (DECL_IS_OPERATOR_NEW_P (fndecl)) - return true; + if (!check_nul_terminated_array (expr, s1) + || !check_nul_terminated_array (expr, s2)) + return NULL_TREE; - if (fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)) + /* If the first argument is "", return NULL_TREE. */ + const char *p1 = c_getstr (s1); + if (p1 && *p1 == '\0') { - switch (DECL_FUNCTION_CODE (fndecl)) - { - case BUILT_IN_ALLOCA: - case BUILT_IN_ALLOCA_WITH_ALIGN: - return all_alloc; - case BUILT_IN_ALIGNED_ALLOC: - case BUILT_IN_CALLOC: - case BUILT_IN_GOMP_ALLOC: - case BUILT_IN_MALLOC: - case BUILT_IN_REALLOC: - case BUILT_IN_STRDUP: - case BUILT_IN_STRNDUP: - return true; - default: - break; - } + /* Evaluate and ignore argument s2 in case it has + side-effects. */ + return omit_one_operand_loc (loc, size_type_node, + size_zero_node, s2); } - /* A function is considered an allocation function if it's declared - with attribute malloc with an argument naming its associated - deallocation function. */ - tree attrs = DECL_ATTRIBUTES (fndecl); - if (!attrs) - return false; - - for (tree allocs = attrs; - (allocs = lookup_attribute ("malloc", allocs)); - allocs = TREE_CHAIN (allocs)) + /* If the second argument is "", return __builtin_strlen(s1). */ + const char *p2 = c_getstr (s2); + if (p2 && *p2 == '\0') { - tree args = TREE_VALUE (allocs); - if (!args) - continue; - - if (TREE_VALUE (args)) - return true; - } - - return false; -} + tree fn = builtin_decl_implicit (BUILT_IN_STRLEN); -/* Return true if STMT is a call to an allocation function. A wrapper - around fndecl_alloc_p. */ + /* If the replacement _DECL isn't initialized, don't do the + transformation. */ + if (!fn) + return NULL_TREE; -static bool -gimple_call_alloc_p (gimple *stmt, bool all_alloc = false) -{ - return fndecl_alloc_p (gimple_call_fndecl (stmt), all_alloc); + return build_call_expr_loc (loc, fn, 1, s1); + } + return NULL_TREE; } -/* Return the zero-based number corresponding to the argument being - deallocated if STMT is a call to a deallocation function or UINT_MAX - if it isn't. */ +/* Fold the next_arg or va_start call EXP. Returns true if there was an error + produced. False otherwise. This is done so that we don't output the error + or warning twice or three times. */ -static unsigned -call_dealloc_argno (tree exp) +bool +fold_builtin_next_arg (tree exp, bool va_start_p) { - tree fndecl = get_callee_fndecl (exp); - if (!fndecl) - return UINT_MAX; - - return fndecl_dealloc_argno (fndecl); -} - -/* Return the zero-based number corresponding to the argument being - deallocated if FNDECL is a deallocation function or UINT_MAX - if it isn't. */ + tree fntype = TREE_TYPE (current_function_decl); + int nargs = call_expr_nargs (exp); + tree arg; + /* There is good chance the current input_location points inside the + definition of the va_start macro (perhaps on the token for + builtin) in a system header, so warnings will not be emitted. + Use the location in real source code. */ + location_t current_location = + linemap_unwind_to_first_non_reserved_loc (line_table, input_location, + NULL); -unsigned -fndecl_dealloc_argno (tree fndecl) -{ - /* A call to operator delete isn't recognized as one to a built-in. */ - if (DECL_IS_OPERATOR_DELETE_P (fndecl)) + if (!stdarg_p (fntype)) { - if (DECL_IS_REPLACEABLE_OPERATOR (fndecl)) - return 0; - - /* Avoid placement delete that's not been inlined. */ - tree fname = DECL_ASSEMBLER_NAME (fndecl); - if (id_equal (fname, "_ZdlPvS_") // ordinary form - || id_equal (fname, "_ZdaPvS_")) // array form - return UINT_MAX; - return 0; + error ("% used in function with fixed arguments"); + return true; } - /* TODO: Handle user-defined functions with attribute malloc? Handle - known non-built-ins like fopen? */ - if (fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)) + if (va_start_p) { - switch (DECL_FUNCTION_CODE (fndecl)) + if (va_start_p && (nargs != 2)) { - case BUILT_IN_FREE: - case BUILT_IN_REALLOC: - return 0; - default: - break; + error ("wrong number of arguments to function %"); + return true; } - return UINT_MAX; + arg = CALL_EXPR_ARG (exp, 1); } - - tree attrs = DECL_ATTRIBUTES (fndecl); - if (!attrs) - return UINT_MAX; - - for (tree atfree = attrs; - (atfree = lookup_attribute ("*dealloc", atfree)); - atfree = TREE_CHAIN (atfree)) + /* We use __builtin_va_start (ap, 0, 0) or __builtin_next_arg (0, 0) + when we checked the arguments and if needed issued a warning. */ + else { - tree alloc = TREE_VALUE (atfree); - if (!alloc) - continue; - - tree pos = TREE_CHAIN (alloc); - if (!pos) - return 0; - - pos = TREE_VALUE (pos); - return TREE_INT_CST_LOW (pos) - 1; + if (nargs == 0) + { + /* Evidently an out of date version of ; can't validate + va_start's second argument, but can still work as intended. */ + warning_at (current_location, + OPT_Wvarargs, + "%<__builtin_next_arg%> called without an argument"); + return true; + } + else if (nargs > 1) + { + error ("wrong number of arguments to function %<__builtin_next_arg%>"); + return true; + } + arg = CALL_EXPR_ARG (exp, 0); } - return UINT_MAX; -} - -/* Return true if DELC doesn't refer to an operator delete that's - suitable to call with a pointer returned from the operator new - described by NEWC. */ - -static bool -new_delete_mismatch_p (const demangle_component &newc, - const demangle_component &delc) -{ - if (newc.type != delc.type) - return true; + if (TREE_CODE (arg) == SSA_NAME + && SSA_NAME_VAR (arg)) + arg = SSA_NAME_VAR (arg); - switch (newc.type) + /* We destructively modify the call to be __builtin_va_start (ap, 0) + or __builtin_next_arg (0) the first time we see it, after checking + the arguments and if needed issuing a warning. */ + if (!integer_zerop (arg)) { - case DEMANGLE_COMPONENT_NAME: - { - int len = newc.u.s_name.len; - const char *news = newc.u.s_name.s; - const char *dels = delc.u.s_name.s; - if (len != delc.u.s_name.len || memcmp (news, dels, len)) - return true; - - if (news[len] == 'n') - { - if (news[len + 1] == 'a') - return dels[len] != 'd' || dels[len + 1] != 'a'; - if (news[len + 1] == 'w') - return dels[len] != 'd' || dels[len + 1] != 'l'; - } - return false; - } - - case DEMANGLE_COMPONENT_OPERATOR: - /* Operator mismatches are handled above. */ - return false; - - case DEMANGLE_COMPONENT_EXTENDED_OPERATOR: - if (newc.u.s_extended_operator.args != delc.u.s_extended_operator.args) - return true; - return new_delete_mismatch_p (*newc.u.s_extended_operator.name, - *delc.u.s_extended_operator.name); - - case DEMANGLE_COMPONENT_FIXED_TYPE: - if (newc.u.s_fixed.accum != delc.u.s_fixed.accum - || newc.u.s_fixed.sat != delc.u.s_fixed.sat) - return true; - return new_delete_mismatch_p (*newc.u.s_fixed.length, - *delc.u.s_fixed.length); + tree last_parm = tree_last (DECL_ARGUMENTS (current_function_decl)); - case DEMANGLE_COMPONENT_CTOR: - if (newc.u.s_ctor.kind != delc.u.s_ctor.kind) - return true; - return new_delete_mismatch_p (*newc.u.s_ctor.name, - *delc.u.s_ctor.name); + /* Strip off all nops for the sake of the comparison. This + is not quite the same as STRIP_NOPS. It does more. + We must also strip off INDIRECT_EXPR for C++ reference + parameters. */ + while (CONVERT_EXPR_P (arg) + || TREE_CODE (arg) == INDIRECT_REF) + arg = TREE_OPERAND (arg, 0); + if (arg != last_parm) + { + /* FIXME: Sometimes with the tree optimizers we can get the + not the last argument even though the user used the last + argument. We just warn and set the arg to be the last + argument so that we will get wrong-code because of + it. */ + warning_at (current_location, + OPT_Wvarargs, + "second parameter of % not last named argument"); + } - case DEMANGLE_COMPONENT_DTOR: - if (newc.u.s_dtor.kind != delc.u.s_dtor.kind) - return true; - return new_delete_mismatch_p (*newc.u.s_dtor.name, - *delc.u.s_dtor.name); + /* Undefined by C99 7.15.1.4p4 (va_start): + "If the parameter parmN is declared with the register storage + class, with a function or array type, or with a type that is + not compatible with the type that results after application of + the default argument promotions, the behavior is undefined." + */ + else if (DECL_REGISTER (arg)) + { + warning_at (current_location, + OPT_Wvarargs, + "undefined behavior when second parameter of " + "% is declared with % storage"); + } - case DEMANGLE_COMPONENT_BUILTIN_TYPE: - { - /* The demangler API provides no better way to compare built-in - types except to by comparing their demangled names. */ - size_t nsz, dsz; - demangle_component *pnc = const_cast(&newc); - demangle_component *pdc = const_cast(&delc); - char *nts = cplus_demangle_print (0, pnc, 16, &nsz); - char *dts = cplus_demangle_print (0, pdc, 16, &dsz); - if (!nts != !dts) - return true; - bool mismatch = strcmp (nts, dts); - free (nts); - free (dts); - return mismatch; - } + /* We want to verify the second parameter just once before the tree + optimizers are run and then avoid keeping it in the tree, + as otherwise we could warn even for correct code like: + void foo (int i, ...) + { va_list ap; i++; va_start (ap, i); va_end (ap); } */ + if (va_start_p) + CALL_EXPR_ARG (exp, 1) = integer_zero_node; + else + CALL_EXPR_ARG (exp, 0) = integer_zero_node; + } + return false; +} - case DEMANGLE_COMPONENT_SUB_STD: - if (newc.u.s_string.len != delc.u.s_string.len) - return true; - return memcmp (newc.u.s_string.string, delc.u.s_string.string, - newc.u.s_string.len); - case DEMANGLE_COMPONENT_FUNCTION_PARAM: - case DEMANGLE_COMPONENT_TEMPLATE_PARAM: - return newc.u.s_number.number != delc.u.s_number.number; +/* Expand a call EXP to __builtin_object_size. */ - case DEMANGLE_COMPONENT_CHARACTER: - return newc.u.s_character.character != delc.u.s_character.character; +static rtx +expand_builtin_object_size (tree exp) +{ + tree ost; + int object_size_type; + tree fndecl = get_callee_fndecl (exp); - case DEMANGLE_COMPONENT_DEFAULT_ARG: - case DEMANGLE_COMPONENT_LAMBDA: - if (newc.u.s_unary_num.num != delc.u.s_unary_num.num) - return true; - return new_delete_mismatch_p (*newc.u.s_unary_num.sub, - *delc.u.s_unary_num.sub); - default: - break; + if (!validate_arglist (exp, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) + { + error ("first argument of %qD must be a pointer, second integer constant", + fndecl); + expand_builtin_trap (); + return const0_rtx; } - if (!newc.u.s_binary.left != !delc.u.s_binary.left) - return true; + ost = CALL_EXPR_ARG (exp, 1); + STRIP_NOPS (ost); - if (!newc.u.s_binary.left) - return false; + if (TREE_CODE (ost) != INTEGER_CST + || tree_int_cst_sgn (ost) < 0 + || compare_tree_int (ost, 3) > 0) + { + error ("last argument of %qD is not integer constant between 0 and 3", + fndecl); + expand_builtin_trap (); + return const0_rtx; + } - if (new_delete_mismatch_p (*newc.u.s_binary.left, *delc.u.s_binary.left) - || !newc.u.s_binary.right != !delc.u.s_binary.right) - return true; + object_size_type = tree_to_shwi (ost); - if (newc.u.s_binary.right) - return new_delete_mismatch_p (*newc.u.s_binary.right, - *delc.u.s_binary.right); - return false; + return object_size_type < 2 ? constm1_rtx : const0_rtx; } -/* Return true if DELETE_DECL is an operator delete that's not suitable - to call with a pointer returned fron NEW_DECL. */ +/* Expand EXP, a call to the __mem{cpy,pcpy,move,set}_chk builtin. + FCODE is the BUILT_IN_* to use. + Return NULL_RTX if we failed; the caller should emit a normal call, + otherwise try to get the result in TARGET, if convenient (and in + mode MODE if that's convenient). */ -static bool -new_delete_mismatch_p (tree new_decl, tree delete_decl) +static rtx +expand_builtin_memory_chk (tree exp, rtx target, machine_mode mode, + enum built_in_function fcode) { - tree new_name = DECL_ASSEMBLER_NAME (new_decl); - tree delete_name = DECL_ASSEMBLER_NAME (delete_decl); - - /* valid_new_delete_pair_p() returns a conservative result (currently - it only handles global operators). A true result is reliable but - a false result doesn't necessarily mean the operators don't match. */ - if (valid_new_delete_pair_p (new_name, delete_name)) - return false; - - /* For anything not handled by valid_new_delete_pair_p() such as member - operators compare the individual demangled components of the mangled - name. */ - const char *new_str = IDENTIFIER_POINTER (new_name); - const char *del_str = IDENTIFIER_POINTER (delete_name); + if (!validate_arglist (exp, + POINTER_TYPE, + fcode == BUILT_IN_MEMSET_CHK + ? INTEGER_TYPE : POINTER_TYPE, + INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE)) + return NULL_RTX; - void *np = NULL, *dp = NULL; - demangle_component *ndc = cplus_demangle_v3_components (new_str, 0, &np); - demangle_component *ddc = cplus_demangle_v3_components (del_str, 0, &dp); - bool mismatch = new_delete_mismatch_p (*ndc, *ddc); - free (np); - free (dp); - return mismatch; -} + tree dest = CALL_EXPR_ARG (exp, 0); + tree src = CALL_EXPR_ARG (exp, 1); + tree len = CALL_EXPR_ARG (exp, 2); + tree size = CALL_EXPR_ARG (exp, 3); -/* ALLOC_DECL and DEALLOC_DECL are pair of allocation and deallocation - functions. Return true if the latter is suitable to deallocate objects - allocated by calls to the former. */ + /* FIXME: Set access mode to write only for memset et al. */ + bool sizes_ok = check_access (exp, len, /*maxread=*/NULL_TREE, + /*srcstr=*/NULL_TREE, size, access_read_write); -static bool -matching_alloc_calls_p (tree alloc_decl, tree dealloc_decl) -{ - /* Set to alloc_kind_t::builtin if ALLOC_DECL is associated with - a built-in deallocator. */ - enum class alloc_kind_t { none, builtin, user } - alloc_dealloc_kind = alloc_kind_t::none; + if (!tree_fits_uhwi_p (size)) + return NULL_RTX; - if (DECL_IS_OPERATOR_NEW_P (alloc_decl)) + if (tree_fits_uhwi_p (len) || integer_all_onesp (size)) { - if (DECL_IS_OPERATOR_DELETE_P (dealloc_decl)) - /* Return true iff both functions are of the same array or - singleton form and false otherwise. */ - return !new_delete_mismatch_p (alloc_decl, dealloc_decl); + /* Avoid transforming the checking call to an ordinary one when + an overflow has been detected or when the call couldn't be + validated because the size is not constant. */ + if (!sizes_ok && !integer_all_onesp (size) && tree_int_cst_lt (size, len)) + return NULL_RTX; - /* Return false for deallocation functions that are known not - to match. */ - if (fndecl_built_in_p (dealloc_decl, BUILT_IN_FREE) - || fndecl_built_in_p (dealloc_decl, BUILT_IN_REALLOC)) - return false; - /* Otherwise proceed below to check the deallocation function's - "*dealloc" attributes to look for one that mentions this operator - new. */ - } - else if (fndecl_built_in_p (alloc_decl, BUILT_IN_NORMAL)) - { - switch (DECL_FUNCTION_CODE (alloc_decl)) + tree fn = NULL_TREE; + /* If __builtin_mem{cpy,pcpy,move,set}_chk is used, assume + mem{cpy,pcpy,move,set} is available. */ + switch (fcode) { - case BUILT_IN_ALLOCA: - case BUILT_IN_ALLOCA_WITH_ALIGN: - return false; - - case BUILT_IN_ALIGNED_ALLOC: - case BUILT_IN_CALLOC: - case BUILT_IN_GOMP_ALLOC: - case BUILT_IN_MALLOC: - case BUILT_IN_REALLOC: - case BUILT_IN_STRDUP: - case BUILT_IN_STRNDUP: - if (DECL_IS_OPERATOR_DELETE_P (dealloc_decl)) - return false; - - if (fndecl_built_in_p (dealloc_decl, BUILT_IN_FREE) - || fndecl_built_in_p (dealloc_decl, BUILT_IN_REALLOC)) - return true; - - alloc_dealloc_kind = alloc_kind_t::builtin; + case BUILT_IN_MEMCPY_CHK: + fn = builtin_decl_explicit (BUILT_IN_MEMCPY); + break; + case BUILT_IN_MEMPCPY_CHK: + fn = builtin_decl_explicit (BUILT_IN_MEMPCPY); + break; + case BUILT_IN_MEMMOVE_CHK: + fn = builtin_decl_explicit (BUILT_IN_MEMMOVE); + break; + case BUILT_IN_MEMSET_CHK: + fn = builtin_decl_explicit (BUILT_IN_MEMSET); break; - default: break; } - } - - /* Set if DEALLOC_DECL both allocates and deallocates. */ - alloc_kind_t realloc_kind = alloc_kind_t::none; - - if (fndecl_built_in_p (dealloc_decl, BUILT_IN_NORMAL)) - { - built_in_function dealloc_code = DECL_FUNCTION_CODE (dealloc_decl); - if (dealloc_code == BUILT_IN_REALLOC) - realloc_kind = alloc_kind_t::builtin; - - for (tree amats = DECL_ATTRIBUTES (alloc_decl); - (amats = lookup_attribute ("malloc", amats)); - amats = TREE_CHAIN (amats)) - { - tree args = TREE_VALUE (amats); - if (!args) - continue; - tree fndecl = TREE_VALUE (args); - if (!fndecl || !DECL_P (fndecl)) - continue; + if (! fn) + return NULL_RTX; - if (fndecl_built_in_p (fndecl, BUILT_IN_NORMAL) - && dealloc_code == DECL_FUNCTION_CODE (fndecl)) - return true; - } + fn = build_call_nofold_loc (EXPR_LOCATION (exp), fn, 3, dest, src, len); + gcc_assert (TREE_CODE (fn) == CALL_EXPR); + CALL_EXPR_TAILCALL (fn) = CALL_EXPR_TAILCALL (exp); + return expand_expr (fn, target, mode, EXPAND_NORMAL); } - - const bool alloc_builtin = fndecl_built_in_p (alloc_decl, BUILT_IN_NORMAL); - alloc_kind_t realloc_dealloc_kind = alloc_kind_t::none; - - /* If DEALLOC_DECL has an internal "*dealloc" attribute scan the list - of its associated allocation functions for ALLOC_DECL. - If the corresponding ALLOC_DECL is found they're a matching pair, - otherwise they're not. - With DDATS set to the Deallocator's *Dealloc ATtributes... */ - for (tree ddats = DECL_ATTRIBUTES (dealloc_decl); - (ddats = lookup_attribute ("*dealloc", ddats)); - ddats = TREE_CHAIN (ddats)) + else if (fcode == BUILT_IN_MEMSET_CHK) + return NULL_RTX; + else { - tree args = TREE_VALUE (ddats); - if (!args) - continue; - - tree alloc = TREE_VALUE (args); - if (!alloc) - continue; + unsigned int dest_align = get_pointer_alignment (dest); - if (alloc == DECL_NAME (dealloc_decl)) - realloc_kind = alloc_kind_t::user; + /* If DEST is not a pointer type, call the normal function. */ + if (dest_align == 0) + return NULL_RTX; - if (DECL_P (alloc)) + /* If SRC and DEST are the same (and not volatile), do nothing. */ + if (operand_equal_p (src, dest, 0)) { - gcc_checking_assert (fndecl_built_in_p (alloc, BUILT_IN_NORMAL)); + tree expr; - switch (DECL_FUNCTION_CODE (alloc)) + if (fcode != BUILT_IN_MEMPCPY_CHK) { - case BUILT_IN_ALIGNED_ALLOC: - case BUILT_IN_CALLOC: - case BUILT_IN_GOMP_ALLOC: - case BUILT_IN_MALLOC: - case BUILT_IN_REALLOC: - case BUILT_IN_STRDUP: - case BUILT_IN_STRNDUP: - realloc_dealloc_kind = alloc_kind_t::builtin; - break; - default: - break; + /* Evaluate and ignore LEN in case it has side-effects. */ + expand_expr (len, const0_rtx, VOIDmode, EXPAND_NORMAL); + return expand_expr (dest, target, mode, EXPAND_NORMAL); } - if (!alloc_builtin) - continue; - - if (DECL_FUNCTION_CODE (alloc) != DECL_FUNCTION_CODE (alloc_decl)) - continue; - - return true; + expr = fold_build_pointer_plus (dest, len); + return expand_expr (expr, target, mode, EXPAND_NORMAL); } - if (alloc == DECL_NAME (alloc_decl)) - return true; - } - - if (realloc_kind == alloc_kind_t::none) - return false; + /* __memmove_chk special case. */ + if (fcode == BUILT_IN_MEMMOVE_CHK) + { + unsigned int src_align = get_pointer_alignment (src); - hash_set common_deallocs; - /* Special handling for deallocators. Iterate over both the allocator's - and the reallocator's associated deallocator functions looking for - the first one in common. If one is found, the de/reallocator is - a match for the allocator even though the latter isn't directly - associated with the former. This simplifies declarations in system - headers. - With AMATS set to the Allocator's Malloc ATtributes, - and RMATS set to Reallocator's Malloc ATtributes... */ - for (tree amats = DECL_ATTRIBUTES (alloc_decl), - rmats = DECL_ATTRIBUTES (dealloc_decl); - (amats = lookup_attribute ("malloc", amats)) - || (rmats = lookup_attribute ("malloc", rmats)); - amats = amats ? TREE_CHAIN (amats) : NULL_TREE, - rmats = rmats ? TREE_CHAIN (rmats) : NULL_TREE) - { - if (tree args = amats ? TREE_VALUE (amats) : NULL_TREE) - if (tree adealloc = TREE_VALUE (args)) - { - if (DECL_P (adealloc) - && fndecl_built_in_p (adealloc, BUILT_IN_NORMAL)) - { - built_in_function fncode = DECL_FUNCTION_CODE (adealloc); - if (fncode == BUILT_IN_FREE || fncode == BUILT_IN_REALLOC) - { - if (realloc_kind == alloc_kind_t::builtin) - return true; - alloc_dealloc_kind = alloc_kind_t::builtin; - } - continue; - } - - common_deallocs.add (adealloc); - } + if (src_align == 0) + return NULL_RTX; - if (tree args = rmats ? TREE_VALUE (rmats) : NULL_TREE) - if (tree ddealloc = TREE_VALUE (args)) - { - if (DECL_P (ddealloc) - && fndecl_built_in_p (ddealloc, BUILT_IN_NORMAL)) - { - built_in_function fncode = DECL_FUNCTION_CODE (ddealloc); - if (fncode == BUILT_IN_FREE || fncode == BUILT_IN_REALLOC) - { - if (alloc_dealloc_kind == alloc_kind_t::builtin) - return true; - realloc_dealloc_kind = alloc_kind_t::builtin; - } - continue; - } - - if (common_deallocs.add (ddealloc)) - return true; - } + /* If src is categorized for a readonly section we can use + normal __memcpy_chk. */ + if (readonly_data_expr (src)) + { + tree fn = builtin_decl_explicit (BUILT_IN_MEMCPY_CHK); + if (!fn) + return NULL_RTX; + fn = build_call_nofold_loc (EXPR_LOCATION (exp), fn, 4, + dest, src, len, size); + gcc_assert (TREE_CODE (fn) == CALL_EXPR); + CALL_EXPR_TAILCALL (fn) = CALL_EXPR_TAILCALL (exp); + return expand_expr (fn, target, mode, EXPAND_NORMAL); + } + } + return NULL_RTX; } - - /* Succeed only if ALLOC_DECL and the reallocator DEALLOC_DECL share - a built-in deallocator. */ - return (alloc_dealloc_kind == alloc_kind_t::builtin - && realloc_dealloc_kind == alloc_kind_t::builtin); } -/* Return true if DEALLOC_DECL is a function suitable to deallocate - objectes allocated by the ALLOC call. */ +/* Emit warning if a buffer overflow is detected at compile time. */ -static bool -matching_alloc_calls_p (gimple *alloc, tree dealloc_decl) +static void +maybe_emit_chk_warning (tree exp, enum built_in_function fcode) { - tree alloc_decl = gimple_call_fndecl (alloc); - if (!alloc_decl) - return true; - - return matching_alloc_calls_p (alloc_decl, dealloc_decl); -} + /* The source string. */ + tree srcstr = NULL_TREE; + /* The size of the destination object returned by __builtin_object_size. */ + tree objsize = NULL_TREE; + /* The string that is being concatenated with (as in __strcat_chk) + or null if it isn't. */ + tree catstr = NULL_TREE; + /* The maximum length of the source sequence in a bounded operation + (such as __strncat_chk) or null if the operation isn't bounded + (such as __strcat_chk). */ + tree maxread = NULL_TREE; + /* The exact size of the access (such as in __strncpy_chk). */ + tree size = NULL_TREE; + /* The access by the function that's checked. Except for snprintf + both writing and reading is checked. */ + access_mode mode = access_read_write; -/* Diagnose a call EXP to deallocate a pointer referenced by AREF if it - includes a nonzero offset. Such a pointer cannot refer to the beginning - of an allocated object. A negative offset may refer to it only if - the target pointer is unknown. */ + switch (fcode) + { + case BUILT_IN_STRCPY_CHK: + case BUILT_IN_STPCPY_CHK: + srcstr = CALL_EXPR_ARG (exp, 1); + objsize = CALL_EXPR_ARG (exp, 2); + break; -static bool -warn_dealloc_offset (location_t loc, tree exp, const access_ref &aref) -{ - if (aref.deref || aref.offrng[0] <= 0 || aref.offrng[1] <= 0) - return false; + case BUILT_IN_STRCAT_CHK: + /* For __strcat_chk the warning will be emitted only if overflowing + by at least strlen (dest) + 1 bytes. */ + catstr = CALL_EXPR_ARG (exp, 0); + srcstr = CALL_EXPR_ARG (exp, 1); + objsize = CALL_EXPR_ARG (exp, 2); + break; - tree dealloc_decl = get_callee_fndecl (exp); - if (!dealloc_decl) - return false; + case BUILT_IN_STRNCAT_CHK: + catstr = CALL_EXPR_ARG (exp, 0); + srcstr = CALL_EXPR_ARG (exp, 1); + maxread = CALL_EXPR_ARG (exp, 2); + objsize = CALL_EXPR_ARG (exp, 3); + break; - if (DECL_IS_OPERATOR_DELETE_P (dealloc_decl) - && !DECL_IS_REPLACEABLE_OPERATOR (dealloc_decl)) - { - /* A call to a user-defined operator delete with a pointer plus offset - may be valid if it's returned from an unknown function (i.e., one - that's not operator new). */ - if (TREE_CODE (aref.ref) == SSA_NAME) - { - gimple *def_stmt = SSA_NAME_DEF_STMT (aref.ref); - if (is_gimple_call (def_stmt)) - { - tree alloc_decl = gimple_call_fndecl (def_stmt); - if (!alloc_decl || !DECL_IS_OPERATOR_NEW_P (alloc_decl)) - return false; - } - } - } + case BUILT_IN_STRNCPY_CHK: + case BUILT_IN_STPNCPY_CHK: + srcstr = CALL_EXPR_ARG (exp, 1); + size = CALL_EXPR_ARG (exp, 2); + objsize = CALL_EXPR_ARG (exp, 3); + break; - char offstr[80]; - offstr[0] = '\0'; - if (wi::fits_shwi_p (aref.offrng[0])) - { - if (aref.offrng[0] == aref.offrng[1] - || !wi::fits_shwi_p (aref.offrng[1])) - sprintf (offstr, " %lli", - (long long)aref.offrng[0].to_shwi ()); - else - sprintf (offstr, " [%lli, %lli]", - (long long)aref.offrng[0].to_shwi (), - (long long)aref.offrng[1].to_shwi ()); + case BUILT_IN_SNPRINTF_CHK: + case BUILT_IN_VSNPRINTF_CHK: + maxread = CALL_EXPR_ARG (exp, 1); + objsize = CALL_EXPR_ARG (exp, 3); + /* The only checked access the write to the destination. */ + mode = access_write_only; + break; + default: + gcc_unreachable (); } - if (!warning_at (loc, OPT_Wfree_nonheap_object, - "%qD called on pointer %qE with nonzero offset%s", - dealloc_decl, aref.ref, offstr)) - return false; - - if (DECL_P (aref.ref)) - inform (DECL_SOURCE_LOCATION (aref.ref), "declared here"); - else if (TREE_CODE (aref.ref) == SSA_NAME) + if (catstr && maxread) { - gimple *def_stmt = SSA_NAME_DEF_STMT (aref.ref); - if (is_gimple_call (def_stmt)) - { - location_t def_loc = gimple_location (def_stmt); - tree alloc_decl = gimple_call_fndecl (def_stmt); - if (alloc_decl) - inform (def_loc, - "returned from %qD", alloc_decl); - else if (tree alloc_fntype = gimple_call_fntype (def_stmt)) - inform (def_loc, - "returned from %qT", alloc_fntype); - else - inform (def_loc, "obtained here"); - } + /* Check __strncat_chk. There is no way to determine the length + of the string to which the source string is being appended so + just warn when the length of the source string is not known. */ + check_strncat_sizes (exp, objsize); + return; } - return true; + check_access (exp, size, maxread, srcstr, objsize, mode); } -/* Issue a warning if a deallocation function such as free, realloc, - or C++ operator delete is called with an argument not returned by - a matching allocation function such as malloc or the corresponding - form of C++ operatorn new. */ +/* Emit warning if a buffer overflow is detected at compile time + in __sprintf_chk/__vsprintf_chk calls. */ -void -maybe_emit_free_warning (tree exp) +static void +maybe_emit_sprintf_chk_warning (tree exp, enum built_in_function fcode) { - tree fndecl = get_callee_fndecl (exp); - if (!fndecl) - return; + tree size, len, fmt; + const char *fmt_str; + int nargs = call_expr_nargs (exp); - unsigned argno = call_dealloc_argno (exp); - if ((unsigned) call_expr_nargs (exp) <= argno) - return; + /* Verify the required arguments in the original call. */ - tree ptr = CALL_EXPR_ARG (exp, argno); - if (integer_zerop (ptr)) + if (nargs < 4) return; + size = CALL_EXPR_ARG (exp, 2); + fmt = CALL_EXPR_ARG (exp, 3); - access_ref aref; - if (!compute_objsize (ptr, 0, &aref)) + if (! tree_fits_uhwi_p (size) || integer_all_onesp (size)) return; - tree ref = aref.ref; - if (integer_zerop (ref)) + /* Check whether the format is a literal string constant. */ + fmt_str = c_getstr (fmt); + if (fmt_str == NULL) return; - tree dealloc_decl = get_callee_fndecl (exp); - location_t loc = EXPR_LOCATION (exp); + if (!init_target_chars ()) + return; - if (DECL_P (ref) || EXPR_P (ref)) + /* If the format doesn't contain % args or %%, we know its size. */ + if (strchr (fmt_str, target_percent) == 0) + len = build_int_cstu (size_type_node, strlen (fmt_str)); + /* If the format is "%s" and first ... argument is a string literal, + we know it too. */ + else if (fcode == BUILT_IN_SPRINTF_CHK + && strcmp (fmt_str, target_percent_s) == 0) { - /* Diagnose freeing a declared object. */ - if (aref.ref_declared () - && warning_at (loc, OPT_Wfree_nonheap_object, - "%qD called on unallocated object %qD", - dealloc_decl, ref)) - { - loc = (DECL_P (ref) - ? DECL_SOURCE_LOCATION (ref) - : EXPR_LOCATION (ref)); - inform (loc, "declared here"); - return; - } + tree arg; - /* Diagnose freeing a pointer that includes a positive offset. - Such a pointer cannot refer to the beginning of an allocated - object. A negative offset may refer to it. */ - if (aref.sizrng[0] != aref.sizrng[1] - && warn_dealloc_offset (loc, exp, aref)) + if (nargs < 5) + return; + arg = CALL_EXPR_ARG (exp, 4); + if (! POINTER_TYPE_P (TREE_TYPE (arg))) + return; + + len = c_strlen (arg, 1); + if (!len || ! tree_fits_uhwi_p (len)) return; } - else if (CONSTANT_CLASS_P (ref)) - { - if (warning_at (loc, OPT_Wfree_nonheap_object, - "%qD called on a pointer to an unallocated " - "object %qE", dealloc_decl, ref)) - { - if (TREE_CODE (ptr) == SSA_NAME) - { - gimple *def_stmt = SSA_NAME_DEF_STMT (ptr); - if (is_gimple_assign (def_stmt)) - { - location_t loc = gimple_location (def_stmt); - inform (loc, "assigned here"); - } - } - return; - } - } - else if (TREE_CODE (ref) == SSA_NAME) - { - /* Also warn if the pointer argument refers to the result - of an allocation call like alloca or VLA. */ - gimple *def_stmt = SSA_NAME_DEF_STMT (ref); - if (is_gimple_call (def_stmt)) - { - bool warned = false; - if (gimple_call_alloc_p (def_stmt)) - { - if (matching_alloc_calls_p (def_stmt, dealloc_decl)) - { - if (warn_dealloc_offset (loc, exp, aref)) - return; - } - else - { - tree alloc_decl = gimple_call_fndecl (def_stmt); - const opt_code opt = - (DECL_IS_OPERATOR_NEW_P (alloc_decl) - || DECL_IS_OPERATOR_DELETE_P (dealloc_decl) - ? OPT_Wmismatched_new_delete - : OPT_Wmismatched_dealloc); - warned = warning_at (loc, opt, - "%qD called on pointer returned " - "from a mismatched allocation " - "function", dealloc_decl); - } - } - else if (gimple_call_builtin_p (def_stmt, BUILT_IN_ALLOCA) - || gimple_call_builtin_p (def_stmt, - BUILT_IN_ALLOCA_WITH_ALIGN)) - warned = warning_at (loc, OPT_Wfree_nonheap_object, - "%qD called on pointer to " - "an unallocated object", - dealloc_decl); - else if (warn_dealloc_offset (loc, exp, aref)) - return; - - if (warned) - { - tree fndecl = gimple_call_fndecl (def_stmt); - inform (gimple_location (def_stmt), - "returned from %qD", fndecl); - return; - } - } - else if (gimple_nop_p (def_stmt)) - { - ref = SSA_NAME_VAR (ref); - /* Diagnose freeing a pointer that includes a positive offset. */ - if (TREE_CODE (ref) == PARM_DECL - && !aref.deref - && aref.sizrng[0] != aref.sizrng[1] - && aref.offrng[0] > 0 && aref.offrng[1] > 0 - && warn_dealloc_offset (loc, exp, aref)) - return; - } - } + else + return; + + /* Add one for the terminating nul. */ + len = fold_build2 (PLUS_EXPR, TREE_TYPE (len), len, size_one_node); + + check_access (exp, /*size=*/NULL_TREE, /*maxread=*/NULL_TREE, len, size, + access_write_only); } /* Fold a call to __builtin_object_size with arguments PTR and OST, -- cgit v1.1 From e5e164effa30fd2b5c5bc3e6883d63889e96d8da Mon Sep 17 00:00:00 2001 From: "H.J. Lu" Date: Sun, 6 Mar 2016 06:38:21 -0800 Subject: Add QI vector mode support to by-pieces for memset 1. Replace scalar_int_mode with fixed_size_mode in the by-pieces infrastructure to allow non-integer mode. 2. Rename widest_int_mode_for_size to widest_fixed_size_mode_for_size to return QI vector mode for memset. 3. Add op_by_pieces_d::smallest_fixed_size_mode_for_size to return the smallest integer or QI vector mode. 4. Remove clear_by_pieces_1 and use builtin_memset_read_str in clear_by_pieces to support vector mode broadcast. 5. Add lowpart_subreg_regno, a wrapper around simplify_subreg_regno that uses subreg_lowpart_offset (mode, prev_mode) as the offset. 6. Add TARGET_GEN_MEMSET_SCRATCH_RTX to allow the backend to use a hard scratch register to avoid stack realignment when expanding memset. gcc/ PR middle-end/90773 * builtins.c (builtin_memcpy_read_str): Change the mode argument from scalar_int_mode to fixed_size_mode. (builtin_strncpy_read_str): Likewise. (gen_memset_value_from_prev): New function. (builtin_memset_read_str): Change the mode argument from scalar_int_mode to fixed_size_mode. Use gen_memset_value_from_prev and support CONST_VECTOR. (builtin_memset_gen_str): Likewise. (try_store_by_multiple_pieces): Use by_pieces_constfn to declare constfun. * builtins.h (builtin_strncpy_read_str): Replace scalar_int_mode with fixed_size_mode. (builtin_memset_read_str): Likewise. * expr.c (widest_int_mode_for_size): Renamed to ... (widest_fixed_size_mode_for_size): Add a bool argument to indicate if QI vector mode can be used. (by_pieces_ninsns): Call widest_fixed_size_mode_for_size instead of widest_int_mode_for_size. (pieces_addr::adjust): Change the mode argument from scalar_int_mode to fixed_size_mode. (op_by_pieces_d): Make m_len read-only. Add a bool member, m_qi_vector_mode, to indicate that QI vector mode can be used. (op_by_pieces_d::op_by_pieces_d): Add a bool argument to initialize m_qi_vector_mode. Call widest_fixed_size_mode_for_size instead of widest_int_mode_for_size. (op_by_pieces_d::get_usable_mode): Change the mode argument from scalar_int_mode to fixed_size_mode. Call widest_fixed_size_mode_for_size instead of widest_int_mode_for_size. (op_by_pieces_d::smallest_fixed_size_mode_for_size): New member function to return the smallest integer or QI vector mode. (op_by_pieces_d::run): Call widest_fixed_size_mode_for_size instead of widest_int_mode_for_size. Call smallest_fixed_size_mode_for_size instead of smallest_int_mode_for_size. (store_by_pieces_d::store_by_pieces_d): Add a bool argument to indicate that QI vector mode can be used and pass it to op_by_pieces_d::op_by_pieces_d. (can_store_by_pieces): Call widest_fixed_size_mode_for_size instead of widest_int_mode_for_size. Pass memsetp to widest_fixed_size_mode_for_size to support QI vector mode. Allow all CONST_VECTORs for memset if vec_duplicate is supported. (store_by_pieces): Pass memsetp to store_by_pieces_d::store_by_pieces_d. (clear_by_pieces_1): Removed. (clear_by_pieces): Replace clear_by_pieces_1 with builtin_memset_read_str and pass true to store_by_pieces_d to support vector mode broadcast. (string_cst_read_str): Change the mode argument from scalar_int_mode to fixed_size_mode. * expr.h (by_pieces_constfn): Change scalar_int_mode to fixed_size_mode. (by_pieces_prev): Likewise. * rtl.h (lowpart_subreg_regno): New. * rtlanal.c (lowpart_subreg_regno): New. A wrapper around simplify_subreg_regno. * target.def (gen_memset_scratch_rtx): New hook. * doc/tm.texi.in: Add TARGET_GEN_MEMSET_SCRATCH_RTX. * doc/tm.texi: Regenerated. gcc/testsuite/ * gcc.target/i386/pr100865-3.c: Expect vmovdqu8 instead of vmovdqu. * gcc.target/i386/pr100865-4b.c: Likewise. --- gcc/builtins.c | 171 ++++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 140 insertions(+), 31 deletions(-) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index 845a8bb..2387b5d 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -3119,13 +3119,16 @@ expand_builtin_strnlen (tree exp, rtx target, machine_mode target_mode) static rtx builtin_memcpy_read_str (void *data, void *, HOST_WIDE_INT offset, - scalar_int_mode mode) + fixed_size_mode mode) { /* The REPresentation pointed to by DATA need not be a nul-terminated string but the caller guarantees it's large enough for MODE. */ const char *rep = (const char *) data; - return c_readstr (rep + offset, mode, /*nul_terminated=*/false); + /* The by-pieces infrastructure does not try to pick a vector mode + for memcpy expansion. */ + return c_readstr (rep + offset, as_a (mode), + /*nul_terminated=*/false); } /* LEN specify length of the block of memcpy/memset operation. @@ -3742,14 +3745,16 @@ expand_builtin_stpncpy (tree exp, rtx) rtx builtin_strncpy_read_str (void *data, void *, HOST_WIDE_INT offset, - scalar_int_mode mode) + fixed_size_mode mode) { const char *str = (const char *) data; if ((unsigned HOST_WIDE_INT) offset > strlen (str)) return const0_rtx; - return c_readstr (str + offset, mode); + /* The by-pieces infrastructure does not try to pick a vector mode + for strncpy expansion. */ + return c_readstr (str + offset, as_a (mode)); } /* Helper to check the sizes of sequences and the destination of calls @@ -3950,30 +3955,122 @@ expand_builtin_strncpy (tree exp, rtx target) return NULL_RTX; } -/* Callback routine for store_by_pieces. Read GET_MODE_BITSIZE (MODE) - bytes from constant string DATA + OFFSET and return it as target - constant. If PREV isn't nullptr, it has the RTL info from the +/* Return the RTL of a register in MODE generated from PREV in the previous iteration. */ -rtx -builtin_memset_read_str (void *data, void *prevp, - HOST_WIDE_INT offset ATTRIBUTE_UNUSED, - scalar_int_mode mode) +static rtx +gen_memset_value_from_prev (by_pieces_prev *prev, fixed_size_mode mode) { - by_pieces_prev *prev = (by_pieces_prev *) prevp; + rtx target = nullptr; if (prev != nullptr && prev->data != nullptr) { /* Use the previous data in the same mode. */ if (prev->mode == mode) return prev->data; + + fixed_size_mode prev_mode = prev->mode; + + /* Don't use the previous data to write QImode if it is in a + vector mode. */ + if (VECTOR_MODE_P (prev_mode) && mode == QImode) + return target; + + rtx prev_rtx = prev->data; + + if (REG_P (prev_rtx) + && HARD_REGISTER_P (prev_rtx) + && lowpart_subreg_regno (REGNO (prev_rtx), prev_mode, mode) < 0) + { + /* This case occurs when PREV_MODE is a vector and when + MODE is too small to store using vector operations. + After register allocation, the code will need to move the + lowpart of the vector register into a non-vector register. + + Also, the target has chosen to use a hard register + instead of going with the default choice of using a + pseudo register. We should respect that choice and try to + avoid creating a pseudo register with the same mode as the + current hard register. + + In principle, we could just use a lowpart MODE subreg of + the vector register. However, the vector register mode might + be too wide for non-vector registers, and we already know + that the non-vector mode is too small for vector registers. + It's therefore likely that we'd need to spill to memory in + the vector mode and reload the non-vector value from there. + + Try to avoid that by reducing the vector register to the + smallest size that it can hold. This should increase the + chances that non-vector registers can hold both the inner + and outer modes of the subreg that we generate later. */ + machine_mode m; + fixed_size_mode candidate; + FOR_EACH_MODE_IN_CLASS (m, GET_MODE_CLASS (mode)) + if (is_a (m, &candidate)) + { + if (GET_MODE_SIZE (candidate) + >= GET_MODE_SIZE (prev_mode)) + break; + if (GET_MODE_SIZE (candidate) >= GET_MODE_SIZE (mode) + && lowpart_subreg_regno (REGNO (prev_rtx), + prev_mode, candidate) >= 0) + { + target = lowpart_subreg (candidate, prev_rtx, + prev_mode); + prev_rtx = target; + prev_mode = candidate; + break; + } + } + if (target == nullptr) + prev_rtx = copy_to_reg (prev_rtx); + } + + target = lowpart_subreg (mode, prev_rtx, prev_mode); } + return target; +} +/* Callback routine for store_by_pieces. Read GET_MODE_BITSIZE (MODE) + bytes from constant string DATA + OFFSET and return it as target + constant. If PREV isn't nullptr, it has the RTL info from the + previous iteration. */ + +rtx +builtin_memset_read_str (void *data, void *prev, + HOST_WIDE_INT offset ATTRIBUTE_UNUSED, + fixed_size_mode mode) +{ const char *c = (const char *) data; - char *p = XALLOCAVEC (char, GET_MODE_SIZE (mode)); + unsigned int size = GET_MODE_SIZE (mode); + + rtx target = gen_memset_value_from_prev ((by_pieces_prev *) prev, + mode); + if (target != nullptr) + return target; + rtx src = gen_int_mode (*c, QImode); - memset (p, *c, GET_MODE_SIZE (mode)); + if (VECTOR_MODE_P (mode)) + { + gcc_assert (GET_MODE_INNER (mode) == QImode); + + rtx const_vec = gen_const_vec_duplicate (mode, src); + if (prev == NULL) + /* Return CONST_VECTOR when called by a query function. */ + return const_vec; + + /* Use the move expander with CONST_VECTOR. */ + target = targetm.gen_memset_scratch_rtx (mode); + emit_move_insn (target, const_vec); + return target; + } + + char *p = XALLOCAVEC (char, size); + + memset (p, *c, size); - return c_readstr (p, mode); + /* Vector modes should be handled above. */ + return c_readstr (p, as_a (mode)); } /* Callback routine for store_by_pieces. Return the RTL of a register @@ -3983,33 +4080,45 @@ builtin_memset_read_str (void *data, void *prevp, nullptr, it has the RTL info from the previous iteration. */ static rtx -builtin_memset_gen_str (void *data, void *prevp, +builtin_memset_gen_str (void *data, void *prev, HOST_WIDE_INT offset ATTRIBUTE_UNUSED, - scalar_int_mode mode) + fixed_size_mode mode) { rtx target, coeff; size_t size; char *p; - by_pieces_prev *prev = (by_pieces_prev *) prevp; - if (prev != nullptr && prev->data != nullptr) - { - /* Use the previous data in the same mode. */ - if (prev->mode == mode) - return prev->data; - - target = simplify_gen_subreg (mode, prev->data, prev->mode, 0); - if (target != nullptr) - return target; - } - size = GET_MODE_SIZE (mode); if (size == 1) return (rtx) data; + target = gen_memset_value_from_prev ((by_pieces_prev *) prev, mode); + if (target != nullptr) + return target; + + if (VECTOR_MODE_P (mode)) + { + gcc_assert (GET_MODE_INNER (mode) == QImode); + + /* vec_duplicate_optab is a precondition to pick a vector mode for + the memset expander. */ + insn_code icode = optab_handler (vec_duplicate_optab, mode); + + target = targetm.gen_memset_scratch_rtx (mode); + class expand_operand ops[2]; + create_output_operand (&ops[0], target, mode); + create_input_operand (&ops[1], (rtx) data, QImode); + expand_insn (icode, 2, ops); + if (!rtx_equal_p (target, ops[0].value)) + emit_move_insn (target, ops[0].value); + + return target; + } + p = XALLOCAVEC (char, size); memset (p, 1, size); - coeff = c_readstr (p, mode); + /* Vector modes should be handled above. */ + coeff = c_readstr (p, as_a (mode)); target = convert_to_mode (mode, (rtx) data, 1); target = expand_mult (mode, target, coeff, NULL_RTX, 1); @@ -4113,7 +4222,7 @@ try_store_by_multiple_pieces (rtx to, rtx len, unsigned int ctz_len, &valc, align, true)) return false; - rtx (*constfun) (void *, void *, HOST_WIDE_INT, scalar_int_mode); + by_pieces_constfn constfun; void *constfundata; if (val) { -- cgit v1.1 From 81d6cdd335ffc60c216a020d5c99306f659377a2 Mon Sep 17 00:00:00 2001 From: Martin Sebor Date: Fri, 6 Aug 2021 15:29:33 -0600 Subject: Move more code to new gimple-ssa-warn-access pass. gcc/ChangeLog: * builtins.c (expand_builtin_memchr): Move to gimple-ssa-warn-access.cc. (expand_builtin_strcat): Same. (expand_builtin_stpncpy): Same. (expand_builtin_strncat): Same. (check_read_access): Same. (check_memop_access): Same. (expand_builtin_strlen): Move checks to gimple-ssa-warn-access.cc. (expand_builtin_strnlen): Same. (expand_builtin_memcpy): Same. (expand_builtin_memmove): Same. (expand_builtin_mempcpy): Same. (expand_builtin_strcpy): Same. (expand_builtin_strcpy_args): Same. (expand_builtin_stpcpy_1): Same. (expand_builtin_strncpy): Same. (expand_builtin_memset): Same. (expand_builtin_bzero): Same. (expand_builtin_strcmp): Same. (expand_builtin_strncmp): Same. (expand_builtin): Remove handlers. (fold_builtin_strlen): Add a comment. * builtins.h (check_access): Move to gimple-ssa-warn-access.cc. * calls.c (maybe_warn_nonstring_arg): Same. * diagnostic-spec.c (nowarn_spec_t::nowarn_spec_t): Add warning option. * gimple-fold.c (gimple_fold_builtin_strcpy): Pass argument to callee. (gimple_fold_builtin_stpcpy): Same. * gimple-ssa-warn-access.cc (has_location): New function. (get_location): Same. (get_callee_fndecl): Same. (call_nargs): Same. (call_arg): Same. (warn_string_no_nul): Define. (unterminated_array): Same. (check_nul_terminated_array): Same. (maybe_warn_nonstring_arg): Same. (maybe_warn_for_bound): Same. (warn_for_access): Same. (check_access): Same. (check_memop_access): Same. (check_read_access): Same. (warn_dealloc_offset): Use helper functions. (maybe_emit_free_warning): Same. (class pass_waccess): Add members. (check_strcat): New function. (check_strncat): New function. (check_stxcpy): New function. (check_stxncpy): New function. (check_strncmp): New function. (pass_waccess::check_builtin): New function. (pass_waccess::check): Call it. * gimple-ssa-warn-access.h (warn_string_no_nul): Move here from builtins.h. (maybe_warn_for_bound): Same. (check_access): Same. (check_memop_access): Same. (check_read_access): Same. * pointer-query.h (struct access_data): Define a ctor overload. gcc/testsuite/ChangeLog: * c-c++-common/Wsizeof-pointer-memaccess1.c: Also disable -Wstringop-overread. * c-c++-common/attr-nonstring-3.c: Adjust pattern of expected message. * gcc.dg/Warray-bounds-39.c: Add an xfail due to a known bug. * gcc.dg/Wstring-compare-3.c: Also disable -Wstringop-overread. * gcc.dg/attr-nonstring-2.c: Adjust pattern of expected message. * gcc.dg/attr-nonstring-4.c: Same. * gcc.dg/Wstringop-overread-6.c: New test. * gcc.dg/sso-14.c: Fix typos to avoid buffer overflow. --- gcc/builtins.c | 399 +-------------------------------------------------------- 1 file changed, 7 insertions(+), 392 deletions(-) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index 2387b5d..d2be807f 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -131,7 +131,6 @@ static rtx expand_builtin_va_copy (tree); static rtx inline_expand_builtin_bytecmp (tree, rtx); static rtx expand_builtin_strcmp (tree, rtx); static rtx expand_builtin_strncmp (tree, rtx, machine_mode); -static rtx expand_builtin_memchr (tree, rtx); static rtx expand_builtin_memcpy (tree, rtx); static rtx expand_builtin_memory_copy_args (tree dest, tree src, tree len, rtx target, tree exp, @@ -140,12 +139,9 @@ static rtx expand_builtin_memory_copy_args (tree dest, tree src, tree len, static rtx expand_builtin_memmove (tree, rtx); static rtx expand_builtin_mempcpy (tree, rtx); static rtx expand_builtin_mempcpy_args (tree, tree, tree, rtx, tree, memop_ret); -static rtx expand_builtin_strcat (tree); static rtx expand_builtin_strcpy (tree, rtx); static rtx expand_builtin_strcpy_args (tree, tree, tree, rtx); static rtx expand_builtin_stpcpy (tree, rtx, machine_mode); -static rtx expand_builtin_stpncpy (tree, rtx); -static rtx expand_builtin_strncat (tree, rtx); static rtx expand_builtin_strncpy (tree, rtx); static rtx expand_builtin_memset (tree, rtx, machine_mode); static rtx expand_builtin_memset_args (tree, tree, tree, rtx, machine_mode, tree); @@ -186,7 +182,6 @@ static rtx expand_builtin_memory_chk (tree, rtx, machine_mode, static void maybe_emit_chk_warning (tree, enum built_in_function); static void maybe_emit_sprintf_chk_warning (tree, enum built_in_function); static tree fold_builtin_object_size (tree, tree); -static bool check_read_access (tree, tree, tree = NULL_TREE, int = 1); unsigned HOST_WIDE_INT target_newline; unsigned HOST_WIDE_INT target_percent; @@ -2957,8 +2952,6 @@ expand_builtin_strlen (tree exp, rtx target, return NULL_RTX; tree src = CALL_EXPR_ARG (exp, 0); - if (!check_read_access (exp, src)) - return NULL_RTX; /* If the length can be computed at compile-time, return it. */ if (tree len = c_strlen (src, 0)) @@ -3062,8 +3055,6 @@ expand_builtin_strnlen (tree exp, rtx target, machine_mode target_mode) if (!bound) return NULL_RTX; - check_read_access (exp, src, bound); - location_t loc = UNKNOWN_LOCATION; if (EXPR_HAS_LOCATION (exp)) loc = EXPR_LOCATION (exp); @@ -3201,65 +3192,6 @@ determine_block_size (tree len, rtx len_rtx, GET_MODE_MASK (GET_MODE (len_rtx))); } -/* A convenience wrapper for check_access above to check access - by a read-only function like puts. */ - -static bool -check_read_access (tree exp, tree src, tree bound /* = NULL_TREE */, - int ost /* = 1 */) -{ - if (!warn_stringop_overread) - return true; - - if (bound && !useless_type_conversion_p (size_type_node, TREE_TYPE (bound))) - bound = fold_convert (size_type_node, bound); - access_data data (exp, access_read_only, NULL_TREE, false, bound, true); - compute_objsize (src, ost, &data.src); - return check_access (exp, /*dstwrite=*/ NULL_TREE, /*maxread=*/ bound, - /*srcstr=*/ src, /*dstsize=*/ NULL_TREE, data.mode, - &data); -} - -/* Helper to determine and check the sizes of the source and the destination - of calls to __builtin_{bzero,memcpy,mempcpy,memset} calls. EXP is the - call expression, DEST is the destination argument, SRC is the source - argument or null, and LEN is the number of bytes. Use Object Size type-0 - regardless of the OPT_Wstringop_overflow_ setting. Return true on success - (no overflow or invalid sizes), false otherwise. */ - -static bool -check_memop_access (tree exp, tree dest, tree src, tree size) -{ - /* For functions like memset and memcpy that operate on raw memory - try to determine the size of the largest source and destination - object using type-0 Object Size regardless of the object size - type specified by the option. */ - access_data data (exp, access_read_write); - tree srcsize = src ? compute_objsize (src, 0, &data.src) : NULL_TREE; - tree dstsize = compute_objsize (dest, 0, &data.dst); - - return check_access (exp, size, /*maxread=*/NULL_TREE, - srcsize, dstsize, data.mode, &data); -} - -/* Validate memchr arguments without performing any expansion. - Return NULL_RTX. */ - -static rtx -expand_builtin_memchr (tree exp, rtx) -{ - if (!validate_arglist (exp, - POINTER_TYPE, INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE)) - return NULL_RTX; - - tree arg1 = CALL_EXPR_ARG (exp, 0); - tree len = CALL_EXPR_ARG (exp, 2); - - check_read_access (exp, arg1, len, 0); - - return NULL_RTX; -} - /* Expand a call EXP to the memcpy builtin. Return NULL_RTX if we failed, the caller should emit a normal call, otherwise try to get the result in TARGET, if convenient (and in @@ -3276,8 +3208,6 @@ expand_builtin_memcpy (tree exp, rtx target) tree src = CALL_EXPR_ARG (exp, 1); tree len = CALL_EXPR_ARG (exp, 2); - check_memop_access (exp, dest, src, len); - return expand_builtin_memory_copy_args (dest, src, len, target, exp, /*retmode=*/ RETURN_BEGIN, false); } @@ -3296,8 +3226,6 @@ expand_builtin_memmove (tree exp, rtx target) tree src = CALL_EXPR_ARG (exp, 1); tree len = CALL_EXPR_ARG (exp, 2); - check_memop_access (exp, dest, src, len); - return expand_builtin_memory_copy_args (dest, src, len, target, exp, /*retmode=*/ RETURN_BEGIN, true); } @@ -3334,8 +3262,6 @@ expand_builtin_mempcpy (tree exp, rtx target) /* Avoid expanding mempcpy into memcpy when the call is determined to overflow the buffer. This also prevents the same overflow from being diagnosed again when expanding memcpy. */ - if (!check_memop_access (exp, dest, src, len)) - return NULL_RTX; return expand_builtin_mempcpy_args (dest, src, len, target, exp, /*retmode=*/ RETURN_END); @@ -3511,36 +3437,6 @@ expand_movstr (tree dest, tree src, rtx target, memop_ret retmode) return target; } -/* Do some very basic size validation of a call to the strcpy builtin - given by EXP. Return NULL_RTX to have the built-in expand to a call - to the library function. */ - -static rtx -expand_builtin_strcat (tree exp) -{ - if (!validate_arglist (exp, POINTER_TYPE, POINTER_TYPE, VOID_TYPE) - || !warn_stringop_overflow) - return NULL_RTX; - - tree dest = CALL_EXPR_ARG (exp, 0); - tree src = CALL_EXPR_ARG (exp, 1); - - /* There is no way here to determine the length of the string in - the destination to which the SRC string is being appended so - just diagnose cases when the souce string is longer than - the destination object. */ - access_data data (exp, access_read_write, NULL_TREE, true, - NULL_TREE, true); - const int ost = warn_stringop_overflow ? warn_stringop_overflow - 1 : 1; - compute_objsize (src, ost, &data.src); - tree destsize = compute_objsize (dest, ost, &data.dst); - - check_access (exp, /*dstwrite=*/NULL_TREE, /*maxread=*/NULL_TREE, - src, destsize, data.mode, &data); - - return NULL_RTX; -} - /* Expand expression EXP, which is a call to the strcpy builtin. Return NULL_RTX if we failed the caller should emit a normal call, otherwise try to get the result in TARGET, if convenient (and in mode MODE if that's @@ -3555,29 +3451,7 @@ expand_builtin_strcpy (tree exp, rtx target) tree dest = CALL_EXPR_ARG (exp, 0); tree src = CALL_EXPR_ARG (exp, 1); - if (warn_stringop_overflow) - { - access_data data (exp, access_read_write, NULL_TREE, true, - NULL_TREE, true); - const int ost = warn_stringop_overflow ? warn_stringop_overflow - 1 : 1; - compute_objsize (src, ost, &data.src); - tree dstsize = compute_objsize (dest, ost, &data.dst); - check_access (exp, /*dstwrite=*/ NULL_TREE, - /*maxread=*/ NULL_TREE, /*srcstr=*/ src, - dstsize, data.mode, &data); - } - - if (rtx ret = expand_builtin_strcpy_args (exp, dest, src, target)) - { - /* Check to see if the argument was declared attribute nonstring - and if so, issue a warning since at this point it's not known - to be nul-terminated. */ - tree fndecl = get_callee_fndecl (exp); - maybe_warn_nonstring_arg (fndecl, exp); - return ret; - } - - return NULL_RTX; + return expand_builtin_strcpy_args (exp, dest, src, target); } /* Helper function to do the actual work for expand_builtin_strcpy. The @@ -3587,19 +3461,8 @@ expand_builtin_strcpy (tree exp, rtx target) expand_builtin_strcpy. */ static rtx -expand_builtin_strcpy_args (tree exp, tree dest, tree src, rtx target) +expand_builtin_strcpy_args (tree, tree dest, tree src, rtx target) { - /* Detect strcpy calls with unterminated arrays.. */ - tree size; - bool exact; - if (tree nonstr = unterminated_array (src, &size, &exact)) - { - /* NONSTR refers to the non-nul terminated constant array. */ - warn_string_no_nul (EXPR_LOCATION (exp), exp, NULL, src, nonstr, - size, exact); - return NULL_RTX; - } - return expand_movstr (dest, src, target, /*retmode=*/ RETURN_BEGIN); } @@ -3620,15 +3483,6 @@ expand_builtin_stpcpy_1 (tree exp, rtx target, machine_mode mode) dst = CALL_EXPR_ARG (exp, 0); src = CALL_EXPR_ARG (exp, 1); - if (warn_stringop_overflow) - { - access_data data (exp, access_read_write); - tree destsize = compute_objsize (dst, warn_stringop_overflow - 1, - &data.dst); - check_access (exp, /*dstwrite=*/NULL_TREE, /*maxread=*/NULL_TREE, - src, destsize, data.mode, &data); - } - /* If return value is ignored, transform stpcpy into strcpy. */ if (target == const0_rtx && builtin_decl_implicit (BUILT_IN_STRCPY)) { @@ -3651,9 +3505,6 @@ expand_builtin_stpcpy_1 (tree exp, rtx target, machine_mode mode) return expand_movstr (dst, src, target, /*retmode=*/ RETURN_END_MINUS_ONE); - if (lendata.decl) - warn_string_no_nul (EXPR_LOCATION (exp), exp, NULL, src, lendata.decl); - lenp1 = size_binop_loc (loc, PLUS_EXPR, len, ssize_int (1)); ret = expand_builtin_mempcpy_args (dst, src, lenp1, target, exp, @@ -3715,30 +3566,6 @@ expand_builtin_stpcpy (tree exp, rtx target, machine_mode mode) return NULL_RTX; } -/* Check a call EXP to the stpncpy built-in for validity. - Return NULL_RTX on both success and failure. */ - -static rtx -expand_builtin_stpncpy (tree exp, rtx) -{ - if (!validate_arglist (exp, - POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE) - || !warn_stringop_overflow) - return NULL_RTX; - - /* The source and destination of the call. */ - tree dest = CALL_EXPR_ARG (exp, 0); - tree src = CALL_EXPR_ARG (exp, 1); - - /* The exact number of bytes to write (not the maximum). */ - tree len = CALL_EXPR_ARG (exp, 2); - access_data data (exp, access_read_write); - /* The size of the destination object. */ - tree destsize = compute_objsize (dest, warn_stringop_overflow - 1, &data.dst); - check_access (exp, len, /*maxread=*/len, src, destsize, data.mode, &data); - return NULL_RTX; -} - /* Callback routine for store_by_pieces. Read GET_MODE_BITSIZE (MODE) bytes from constant string DATA + OFFSET and return it as target constant. */ @@ -3817,78 +3644,6 @@ check_strncat_sizes (tree exp, tree objsize) objsize, data.mode, &data); } -/* Similar to expand_builtin_strcat, do some very basic size validation - of a call to the strcpy builtin given by EXP. Return NULL_RTX to have - the built-in expand to a call to the library function. */ - -static rtx -expand_builtin_strncat (tree exp, rtx) -{ - if (!validate_arglist (exp, - POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE) - || !warn_stringop_overflow) - return NULL_RTX; - - tree dest = CALL_EXPR_ARG (exp, 0); - tree src = CALL_EXPR_ARG (exp, 1); - /* The upper bound on the number of bytes to write. */ - tree maxread = CALL_EXPR_ARG (exp, 2); - - /* Detect unterminated source (only). */ - if (!check_nul_terminated_array (exp, src, maxread)) - return NULL_RTX; - - /* The length of the source sequence. */ - tree slen = c_strlen (src, 1); - - /* Try to determine the range of lengths that the source expression - refers to. Since the lengths are only used for warning and not - for code generation disable strict mode below. */ - tree maxlen = slen; - if (!maxlen) - { - c_strlen_data lendata = { }; - get_range_strlen (src, &lendata, /* eltsize = */ 1); - maxlen = lendata.maxbound; - } - - access_data data (exp, access_read_write); - /* Try to verify that the destination is big enough for the shortest - string. First try to determine the size of the destination object - into which the source is being copied. */ - tree destsize = compute_objsize (dest, warn_stringop_overflow - 1, &data.dst); - - /* Add one for the terminating nul. */ - tree srclen = (maxlen - ? fold_build2 (PLUS_EXPR, size_type_node, maxlen, - size_one_node) - : NULL_TREE); - - /* The strncat function copies at most MAXREAD bytes and always appends - the terminating nul so the specified upper bound should never be equal - to (or greater than) the size of the destination. */ - if (tree_fits_uhwi_p (maxread) && tree_fits_uhwi_p (destsize) - && tree_int_cst_equal (destsize, maxread)) - { - location_t loc = EXPR_LOCATION (exp); - warning_at (loc, OPT_Wstringop_overflow_, - "%qD specified bound %E equals destination size", - get_callee_fndecl (exp), maxread); - - return NULL_RTX; - } - - if (!srclen - || (maxread && tree_fits_uhwi_p (maxread) - && tree_fits_uhwi_p (srclen) - && tree_int_cst_lt (maxread, srclen))) - srclen = maxread; - - check_access (exp, /*dstwrite=*/NULL_TREE, maxread, srclen, - destsize, data.mode, &data); - return NULL_RTX; -} - /* Expand expression EXP, which is a call to the strncpy builtin. Return NULL_RTX if we failed the caller should emit a normal call. */ @@ -3908,18 +3663,6 @@ expand_builtin_strncpy (tree exp, rtx target) /* The length of the source sequence. */ tree slen = c_strlen (src, 1); - if (warn_stringop_overflow) - { - access_data data (exp, access_read_write, len, true, len, true); - const int ost = warn_stringop_overflow ? warn_stringop_overflow - 1 : 1; - compute_objsize (src, ost, &data.src); - tree dstsize = compute_objsize (dest, ost, &data.dst); - /* The number of bytes to write is LEN but check_access will also - check SLEN if LEN's value isn't known. */ - check_access (exp, /*dstwrite=*/len, - /*maxread=*/len, src, dstsize, data.mode, &data); - } - /* We must be passed a constant len and src parameter. */ if (!tree_fits_uhwi_p (len) || !slen || !tree_fits_uhwi_p (slen)) return NULL_RTX; @@ -4141,8 +3884,6 @@ expand_builtin_memset (tree exp, rtx target, machine_mode mode) tree val = CALL_EXPR_ARG (exp, 1); tree len = CALL_EXPR_ARG (exp, 2); - check_memop_access (exp, dest, NULL_TREE, len); - return expand_builtin_memset_args (dest, val, len, target, mode, exp); } @@ -4470,8 +4211,6 @@ expand_builtin_bzero (tree exp) tree dest = CALL_EXPR_ARG (exp, 0); tree size = CALL_EXPR_ARG (exp, 1); - check_memop_access (exp, dest, NULL_TREE, size); - /* New argument list transforming bzero(ptr x, int y) to memset(ptr x, int 0, size_t y). This is done this way so that if it isn't expanded inline, we fallback to @@ -4622,10 +4361,6 @@ expand_builtin_strcmp (tree exp, ATTRIBUTE_UNUSED rtx target) tree arg1 = CALL_EXPR_ARG (exp, 0); tree arg2 = CALL_EXPR_ARG (exp, 1); - if (!check_read_access (exp, arg1) - || !check_read_access (exp, arg2)) - return NULL_RTX; - /* Due to the performance benefit, always inline the calls first. */ rtx result = NULL_RTX; result = inline_expand_builtin_bytecmp (exp, target); @@ -4707,11 +4442,6 @@ expand_builtin_strcmp (tree exp, ATTRIBUTE_UNUSED rtx target) tree fndecl = get_callee_fndecl (exp); if (result) { - /* Check to see if the argument was declared attribute nonstring - and if so, issue a warning since at this point it's not known - to be nul-terminated. */ - maybe_warn_nonstring_arg (fndecl, exp); - /* Return the value in the proper mode for this function. */ machine_mode mode = TYPE_MODE (TREE_TYPE (exp)); if (GET_MODE (result) == mode) @@ -4725,6 +4455,7 @@ expand_builtin_strcmp (tree exp, ATTRIBUTE_UNUSED rtx target) /* Expand the library call ourselves using a stabilized argument list to avoid re-evaluating the function's arguments twice. */ tree fn = build_call_nofold_loc (EXPR_LOCATION (exp), fndecl, 2, arg1, arg2); + copy_warning (fn, exp); gcc_assert (TREE_CODE (fn) == CALL_EXPR); CALL_EXPR_TAILCALL (fn) = CALL_EXPR_TAILCALL (exp); return expand_call (fn, target, target == const0_rtx); @@ -4746,66 +4477,10 @@ expand_builtin_strncmp (tree exp, ATTRIBUTE_UNUSED rtx target, tree arg2 = CALL_EXPR_ARG (exp, 1); tree arg3 = CALL_EXPR_ARG (exp, 2); - if (!check_nul_terminated_array (exp, arg1, arg3) - || !check_nul_terminated_array (exp, arg2, arg3)) - return NULL_RTX; - location_t loc = EXPR_LOCATION (exp); tree len1 = c_strlen (arg1, 1); tree len2 = c_strlen (arg2, 1); - if (!len1 || !len2) - { - /* Check to see if the argument was declared attribute nonstring - and if so, issue a warning since at this point it's not known - to be nul-terminated. */ - if (!maybe_warn_nonstring_arg (get_callee_fndecl (exp), exp) - && !len1 && !len2) - { - /* A strncmp read is constrained not just by the bound but - also by the length of the shorter string. Specifying - a bound that's larger than the size of either array makes - no sense and is likely a bug. When the length of neither - of the two strings is known but the sizes of both of - the arrays they are stored in is, issue a warning if - the bound is larger than than the size of the larger - of the two arrays. */ - - access_ref ref1 (arg3, true); - access_ref ref2 (arg3, true); - - tree bndrng[2] = { NULL_TREE, NULL_TREE }; - get_size_range (arg3, bndrng, ref1.bndrng); - - tree size1 = compute_objsize (arg1, 1, &ref1); - tree size2 = compute_objsize (arg2, 1, &ref2); - tree func = get_callee_fndecl (exp); - - if (size1 && size2 && bndrng[0] && !integer_zerop (bndrng[0])) - { - offset_int rem1 = ref1.size_remaining (); - offset_int rem2 = ref2.size_remaining (); - if (rem1 == 0 || rem2 == 0) - maybe_warn_for_bound (OPT_Wstringop_overread, loc, exp, func, - bndrng, integer_zero_node); - else - { - offset_int maxrem = wi::max (rem1, rem2, UNSIGNED); - if (maxrem < wi::to_offset (bndrng[0])) - maybe_warn_for_bound (OPT_Wstringop_overread, loc, exp, - func, bndrng, - wide_int_to_tree (sizetype, maxrem)); - } - } - else if (bndrng[0] - && !integer_zerop (bndrng[0]) - && ((size1 && integer_zerop (size1)) - || (size2 && integer_zerop (size2)))) - maybe_warn_for_bound (OPT_Wstringop_overread, loc, exp, func, - bndrng, integer_zero_node); - } - } - /* Due to the performance benefit, always inline the calls first. */ rtx result = NULL_RTX; result = inline_expand_builtin_bytecmp (exp, target); @@ -7544,63 +7219,12 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode, return target; break; - case BUILT_IN_STRCAT: - target = expand_builtin_strcat (exp); - if (target) - return target; - break; - - case BUILT_IN_GETTEXT: - case BUILT_IN_PUTS: - case BUILT_IN_PUTS_UNLOCKED: - case BUILT_IN_STRDUP: - if (validate_arglist (exp, POINTER_TYPE, VOID_TYPE)) - check_read_access (exp, CALL_EXPR_ARG (exp, 0)); - break; - - case BUILT_IN_INDEX: - case BUILT_IN_RINDEX: - case BUILT_IN_STRCHR: - case BUILT_IN_STRRCHR: - if (validate_arglist (exp, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) - check_read_access (exp, CALL_EXPR_ARG (exp, 0)); - break; - - case BUILT_IN_FPUTS: - case BUILT_IN_FPUTS_UNLOCKED: - if (validate_arglist (exp, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) - check_read_access (exp, CALL_EXPR_ARG (exp, 0)); - break; - - case BUILT_IN_STRNDUP: - if (validate_arglist (exp, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) - check_read_access (exp, CALL_EXPR_ARG (exp, 0), CALL_EXPR_ARG (exp, 1)); - break; - - case BUILT_IN_STRCASECMP: - case BUILT_IN_STRPBRK: - case BUILT_IN_STRSPN: - case BUILT_IN_STRCSPN: - case BUILT_IN_STRSTR: - if (validate_arglist (exp, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) - { - check_read_access (exp, CALL_EXPR_ARG (exp, 0)); - check_read_access (exp, CALL_EXPR_ARG (exp, 1)); - } - break; - case BUILT_IN_STRCPY: target = expand_builtin_strcpy (exp, target); if (target) return target; break; - case BUILT_IN_STRNCAT: - target = expand_builtin_strncat (exp, target); - if (target) - return target; - break; - case BUILT_IN_STRNCPY: target = expand_builtin_strncpy (exp, target); if (target) @@ -7613,18 +7237,6 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode, return target; break; - case BUILT_IN_STPNCPY: - target = expand_builtin_stpncpy (exp, target); - if (target) - return target; - break; - - case BUILT_IN_MEMCHR: - target = expand_builtin_memchr (exp, target); - if (target) - return target; - break; - case BUILT_IN_MEMCPY: target = expand_builtin_memcpy (exp, target); if (target) @@ -8626,8 +8238,11 @@ fold_builtin_strlen (location_t loc, tree expr, tree type, tree arg) if (len) return fold_convert_loc (loc, type, len); + /* TODO: Move this to gimple-ssa-warn-access once the pass runs + also early enough to detect invalid reads in multimensional + arrays and struct members. */ if (!lendata.decl) - c_strlen (arg, 1, &lendata); + c_strlen (arg, 1, &lendata); if (lendata.decl) { -- cgit v1.1 From b48d4e6818674898f90d9358378c127511ef0f9f Mon Sep 17 00:00:00 2001 From: Martin Sebor Date: Tue, 17 Aug 2021 14:49:05 -0600 Subject: Move more warning code to gimple-ssa-warn-access etc. Also resolves: PR middle-end/101854 - Invalid warning -Wstringop-overflow wrong argument gcc/ChangeLog: PR middle-end/101854 * builtins.c (expand_builtin_alloca): Move warning code to check_alloca in gimple-ssa-warn-access.cc. * calls.c (alloc_max_size): Move code to check_alloca. (get_size_range): Move to pointer-query.cc. (maybe_warn_alloc_args_overflow): Move to gimple-ssa-warn-access.cc. (get_attr_nonstring_decl): Move to tree.c. (fntype_argno_type): Move to gimple-ssa-warn-access.cc. (append_attrname): Same. (maybe_warn_rdwr_sizes): Same. (initialize_argument_information): Move code to gimple-ssa-warn-access.cc. * calls.h (maybe_warn_alloc_args_overflow): Move to gimple-ssa-warn-access.h. (get_attr_nonstring_decl): Move to tree.h. (maybe_warn_nonstring_arg): Move to gimple-ssa-warn-access.h. (enum size_range_flags): Move to pointer-query.h. (get_size_range): Same. * gimple-ssa-warn-access.cc (has_location): Remove unused overload to avoid Clang -Wunused-function. (get_size_range): Declare static. (maybe_emit_free_warning): Rename... (maybe_check_dealloc_call): ...to this for consistency. (class pass_waccess): Add members. (pass_waccess::~pass_waccess): Defined. (alloc_max_size): Move here from calls.c. (maybe_warn_alloc_args_overflow): Same. (check_alloca): New function. (check_alloc_size_call): New function. (check_strncat): Handle another warning flag. (pass_waccess::check_builtin): Handle alloca. (fntype_argno_type): Move here from calls.c. (append_attrname): Same. (maybe_warn_rdwr_sizes): Same. (pass_waccess::check_call): Define. (check_nonstring_args): New function. (pass_waccess::check): Call new member functions. (pass_waccess::execute): Enable ranger. * gimple-ssa-warn-access.h (get_size_range): Move here from calls.h. (maybe_warn_nonstring_arg): Same. * gimple-ssa-warn-restrict.c: Remove #include. * pointer-query.cc (get_size_range): Move here from calls.c. * pointer-query.h (enum size_range_flags): Same. (get_size_range): Same. * tree.c (get_attr_nonstring_decl): Move here from calls.c. * tree.h (get_attr_nonstring_decl): Move here from calls.h. gcc/testsuite/ChangeLog: * gcc.dg/attr-alloc_size-5.c: Adjust optimization to -O1. * gcc.dg/attr-alloc_size-7.c: Use #pragmas to adjust optimization. * gcc.dg/attr-alloc_size-8.c: Adjust optimization to -O1. PR middle-end/101854 * gcc.dg/Wstringop-overflow-72.c: New test. --- gcc/builtins.c | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index d2be807f..9954862 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -43,7 +43,7 @@ along with GCC; see the file COPYING3. If not see #include "alias.h" #include "fold-const.h" #include "fold-const-call.h" -#include "gimple-ssa-warn-restrict.h" +#include "gimple-ssa-warn-access.h" #include "stor-layout.h" #include "calls.h" #include "varasm.h" @@ -81,7 +81,6 @@ along with GCC; see the file COPYING3. If not see #include "demangle.h" #include "gimple-range.h" #include "pointer-query.h" -#include "gimple-ssa-warn-access.h" struct target_builtins default_target_builtins; #if SWITCHABLE_TARGET @@ -4896,25 +4895,6 @@ expand_builtin_alloca (tree exp) if (!valid_arglist) return NULL_RTX; - if ((alloca_for_var - && warn_vla_limit >= HOST_WIDE_INT_MAX - && warn_alloc_size_limit < warn_vla_limit) - || (!alloca_for_var - && warn_alloca_limit >= HOST_WIDE_INT_MAX - && warn_alloc_size_limit < warn_alloca_limit - )) - { - /* -Walloca-larger-than and -Wvla-larger-than settings of - less than HOST_WIDE_INT_MAX override the more general - -Walloc-size-larger-than so unless either of the former - options is smaller than the last one (wchich would imply - that the call was already checked), check the alloca - arguments for overflow. */ - tree args[] = { CALL_EXPR_ARG (exp, 0), NULL_TREE }; - int idx[] = { 0, -1 }; - maybe_warn_alloc_args_overflow (fndecl, exp, args, idx); - } - /* Compute the argument. */ op0 = expand_normal (CALL_EXPR_ARG (exp, 0)); -- cgit v1.1 From a25e0b5e6ac8a77a71c229e0a7b744603365b0e9 Mon Sep 17 00:00:00 2001 From: qing zhao Date: Thu, 9 Sep 2021 15:44:49 -0700 Subject: Add -ftrivial-auto-var-init option and uninitialized variable attribute. Initialize automatic variables with either a pattern or with zeroes to increase the security and predictability of a program by preventing uninitialized memory disclosure and use. GCC still considers an automatic variable that doesn't have an explicit initializer as uninitialized, -Wuninitialized will still report warning messages on such automatic variables. With this option, GCC will also initialize any padding of automatic variables that have structure or union types to zeroes. You can control this behavior for a specific variable by using the variable attribute "uninitialized" to control runtime overhead. gcc/ChangeLog: 2021-09-09 qing zhao * builtins.c (expand_builtin_memset): Make external visible. * builtins.h (expand_builtin_memset): Declare extern. * common.opt (ftrivial-auto-var-init=): New option. * doc/extend.texi: Document the uninitialized attribute. * doc/invoke.texi: Document -ftrivial-auto-var-init. * flag-types.h (enum auto_init_type): New enumerated type auto_init_type. * gimple-fold.c (clear_padding_type): Add one new parameter. (clear_padding_union): Likewise. (clear_padding_emit_loop): Likewise. (clear_type_padding_in_mask): Likewise. (gimple_fold_builtin_clear_padding): Handle this new parameter. * gimplify.c (gimple_add_init_for_auto_var): New function. (gimple_add_padding_init_for_auto_var): New function. (is_var_need_auto_init): New function. (gimplify_decl_expr): Add initialization to automatic variables per users' requests. (gimplify_call_expr): Add one new parameter for call to __builtin_clear_padding. (gimplify_init_constructor): Add padding initialization in the end. * internal-fn.c (INIT_PATTERN_VALUE): New macro. (expand_DEFERRED_INIT): New function. * internal-fn.def (DEFERRED_INIT): New internal function. * tree-cfg.c (verify_gimple_call): Verify calls to .DEFERRED_INIT. * tree-sra.c (generate_subtree_deferred_init): New function. (scan_function): Avoid setting cannot_scalarize_away_bitmap for calls to .DEFERRED_INIT. (sra_modify_deferred_init): New function. (sra_modify_function_body): Handle calls to DEFERRED_INIT specially. * tree-ssa-structalias.c (find_func_aliases_for_call): Likewise. * tree-ssa-uninit.c (warn_uninit): Handle calls to DEFERRED_INIT specially. (check_defs): Likewise. (warn_uninitialized_vars): Likewise. * tree-ssa.c (ssa_undefined_value_p): Likewise. * tree.c (build_common_builtin_nodes): Build tree node for BUILT_IN_CLEAR_PADDING when needed. gcc/c-family/ChangeLog: 2021-09-09 qing zhao * c-attribs.c (handle_uninitialized_attribute): New function. (c_common_attribute_table): Add "uninitialized" attribute. gcc/testsuite/ChangeLog: 2021-09-09 qing zhao * c-c++-common/auto-init-1.c: New test. * c-c++-common/auto-init-10.c: New test. * c-c++-common/auto-init-11.c: New test. * c-c++-common/auto-init-12.c: New test. * c-c++-common/auto-init-13.c: New test. * c-c++-common/auto-init-14.c: New test. * c-c++-common/auto-init-15.c: New test. * c-c++-common/auto-init-16.c: New test. * c-c++-common/auto-init-2.c: New test. * c-c++-common/auto-init-3.c: New test. * c-c++-common/auto-init-4.c: New test. * c-c++-common/auto-init-5.c: New test. * c-c++-common/auto-init-6.c: New test. * c-c++-common/auto-init-7.c: New test. * c-c++-common/auto-init-8.c: New test. * c-c++-common/auto-init-9.c: New test. * c-c++-common/auto-init-esra.c: New test. * c-c++-common/auto-init-padding-1.c: New test. * c-c++-common/auto-init-padding-2.c: New test. * c-c++-common/auto-init-padding-3.c: New test. * g++.dg/auto-init-uninit-pred-1_a.C: New test. * g++.dg/auto-init-uninit-pred-2_a.C: New test. * g++.dg/auto-init-uninit-pred-3_a.C: New test. * g++.dg/auto-init-uninit-pred-4.C: New test. * gcc.dg/auto-init-sra-1.c: New test. * gcc.dg/auto-init-sra-2.c: New test. * gcc.dg/auto-init-uninit-1.c: New test. * gcc.dg/auto-init-uninit-12.c: New test. * gcc.dg/auto-init-uninit-13.c: New test. * gcc.dg/auto-init-uninit-14.c: New test. * gcc.dg/auto-init-uninit-15.c: New test. * gcc.dg/auto-init-uninit-16.c: New test. * gcc.dg/auto-init-uninit-17.c: New test. * gcc.dg/auto-init-uninit-18.c: New test. * gcc.dg/auto-init-uninit-19.c: New test. * gcc.dg/auto-init-uninit-2.c: New test. * gcc.dg/auto-init-uninit-20.c: New test. * gcc.dg/auto-init-uninit-21.c: New test. * gcc.dg/auto-init-uninit-22.c: New test. * gcc.dg/auto-init-uninit-23.c: New test. * gcc.dg/auto-init-uninit-24.c: New test. * gcc.dg/auto-init-uninit-25.c: New test. * gcc.dg/auto-init-uninit-26.c: New test. * gcc.dg/auto-init-uninit-3.c: New test. * gcc.dg/auto-init-uninit-34.c: New test. * gcc.dg/auto-init-uninit-36.c: New test. * gcc.dg/auto-init-uninit-37.c: New test. * gcc.dg/auto-init-uninit-4.c: New test. * gcc.dg/auto-init-uninit-5.c: New test. * gcc.dg/auto-init-uninit-6.c: New test. * gcc.dg/auto-init-uninit-8.c: New test. * gcc.dg/auto-init-uninit-9.c: New test. * gcc.dg/auto-init-uninit-A.c: New test. * gcc.dg/auto-init-uninit-B.c: New test. * gcc.dg/auto-init-uninit-C.c: New test. * gcc.dg/auto-init-uninit-H.c: New test. * gcc.dg/auto-init-uninit-I.c: New test. * gcc.target/aarch64/auto-init-1.c: New test. * gcc.target/aarch64/auto-init-2.c: New test. * gcc.target/aarch64/auto-init-3.c: New test. * gcc.target/aarch64/auto-init-4.c: New test. * gcc.target/aarch64/auto-init-5.c: New test. * gcc.target/aarch64/auto-init-6.c: New test. * gcc.target/aarch64/auto-init-7.c: New test. * gcc.target/aarch64/auto-init-8.c: New test. * gcc.target/aarch64/auto-init-padding-1.c: New test. * gcc.target/aarch64/auto-init-padding-10.c: New test. * gcc.target/aarch64/auto-init-padding-11.c: New test. * gcc.target/aarch64/auto-init-padding-12.c: New test. * gcc.target/aarch64/auto-init-padding-2.c: New test. * gcc.target/aarch64/auto-init-padding-3.c: New test. * gcc.target/aarch64/auto-init-padding-4.c: New test. * gcc.target/aarch64/auto-init-padding-5.c: New test. * gcc.target/aarch64/auto-init-padding-6.c: New test. * gcc.target/aarch64/auto-init-padding-7.c: New test. * gcc.target/aarch64/auto-init-padding-8.c: New test. * gcc.target/aarch64/auto-init-padding-9.c: New test. * gcc.target/i386/auto-init-1.c: New test. * gcc.target/i386/auto-init-2.c: New test. * gcc.target/i386/auto-init-21.c: New test. * gcc.target/i386/auto-init-22.c: New test. * gcc.target/i386/auto-init-23.c: New test. * gcc.target/i386/auto-init-24.c: New test. * gcc.target/i386/auto-init-3.c: New test. * gcc.target/i386/auto-init-4.c: New test. * gcc.target/i386/auto-init-5.c: New test. * gcc.target/i386/auto-init-6.c: New test. * gcc.target/i386/auto-init-7.c: New test. * gcc.target/i386/auto-init-8.c: New test. * gcc.target/i386/auto-init-padding-1.c: New test. * gcc.target/i386/auto-init-padding-10.c: New test. * gcc.target/i386/auto-init-padding-11.c: New test. * gcc.target/i386/auto-init-padding-12.c: New test. * gcc.target/i386/auto-init-padding-2.c: New test. * gcc.target/i386/auto-init-padding-3.c: New test. * gcc.target/i386/auto-init-padding-4.c: New test. * gcc.target/i386/auto-init-padding-5.c: New test. * gcc.target/i386/auto-init-padding-6.c: New test. * gcc.target/i386/auto-init-padding-7.c: New test. * gcc.target/i386/auto-init-padding-8.c: New test. * gcc.target/i386/auto-init-padding-9.c: New test. --- gcc/builtins.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'gcc/builtins.c') diff --git a/gcc/builtins.c b/gcc/builtins.c index 9954862..3e57eb0 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -142,7 +142,6 @@ static rtx expand_builtin_strcpy (tree, rtx); static rtx expand_builtin_strcpy_args (tree, tree, tree, rtx); static rtx expand_builtin_stpcpy (tree, rtx, machine_mode); static rtx expand_builtin_strncpy (tree, rtx); -static rtx expand_builtin_memset (tree, rtx, machine_mode); static rtx expand_builtin_memset_args (tree, tree, tree, rtx, machine_mode, tree); static rtx expand_builtin_bzero (tree); static rtx expand_builtin_strlen (tree, rtx, machine_mode); @@ -3872,7 +3871,7 @@ builtin_memset_gen_str (void *data, void *prev, try to get the result in TARGET, if convenient (and in mode MODE if that's convenient). */ -static rtx +rtx expand_builtin_memset (tree exp, rtx target, machine_mode mode) { if (!validate_arglist (exp, -- cgit v1.1