From ea69031c5facc70e4a96df83cd58702900fd54b6 Mon Sep 17 00:00:00 2001 From: Jakub Jelinek Date: Fri, 10 Jan 2020 22:18:22 +0100 Subject: re PR tree-optimization/93210 (Sub-optimal code optimization on struct/combound constexpr (gcc vs. clang)) PR tree-optimization/93210 * fold-const.h (native_encode_initializer, can_native_interpret_type_p): Declare. * fold-const.c (native_encode_string): Fix up handling with off != -1, simplify. (native_encode_initializer): New function, moved from dwarf2out.c. Adjust to native_encode_expr compatible arguments, including dry-run and partial extraction modes. Don't handle STRING_CST. (can_native_interpret_type_p): No longer static. * gimple-fold.c (fold_ctor_reference): For native_encode_expr, verify offset / BITS_PER_UNIT fits into int and don't call it if can_native_interpret_type_p fails. If suboff is NULL and for CONSTRUCTOR fold_{,non}array_ctor_reference returns NULL, retry with native_encode_initializer. (fold_const_aggregate_ref_1): Formatting fix. * dwarf2out.c (native_encode_initializer): Moved to fold-const.c. (tree_add_const_value_attribute): Adjust caller. * gcc.dg/pr93210.c: New test. * g++.dg/opt/pr93210.C: New test. From-SVN: r280141 --- gcc/fold-const.c | 219 +++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 212 insertions(+), 7 deletions(-) (limited to 'gcc/fold-const.c') diff --git a/gcc/fold-const.c b/gcc/fold-const.c index 37c3432..aefa916 100644 --- a/gcc/fold-const.c +++ b/gcc/fold-const.c @@ -7837,9 +7837,10 @@ native_encode_string (const_tree expr, unsigned char *ptr, int len, int off) return 0; if (off == -1) off = 0; + len = MIN (total_bytes - off, len); if (ptr == NULL) /* Dry run. */; - else if (TREE_STRING_LENGTH (expr) - off < MIN (total_bytes, len)) + else { int written = 0; if (off < TREE_STRING_LENGTH (expr)) @@ -7847,12 +7848,9 @@ native_encode_string (const_tree expr, unsigned char *ptr, int len, int off) written = MIN (len, TREE_STRING_LENGTH (expr) - off); memcpy (ptr, TREE_STRING_POINTER (expr) + off, written); } - memset (ptr + written, 0, - MIN (total_bytes - written, len - written)); + memset (ptr + written, 0, len - written); } - else - memcpy (ptr, TREE_STRING_POINTER (expr) + off, MIN (total_bytes, len)); - return MIN (total_bytes - off, len); + return len; } @@ -7895,6 +7893,213 @@ native_encode_expr (const_tree expr, unsigned char *ptr, int len, int off) } } +/* Similar to native_encode_expr, but also handle CONSTRUCTORs, VCEs, + NON_LVALUE_EXPRs and nops. */ + +int +native_encode_initializer (tree init, unsigned char *ptr, int len, + int off) +{ + /* We don't support starting at negative offset and -1 is special. */ + if (off < -1 || init == NULL_TREE) + return 0; + + STRIP_NOPS (init); + switch (TREE_CODE (init)) + { + case VIEW_CONVERT_EXPR: + case NON_LVALUE_EXPR: + return native_encode_initializer (TREE_OPERAND (init, 0), ptr, len, off); + default: + return native_encode_expr (init, ptr, len, off); + case CONSTRUCTOR: + tree type = TREE_TYPE (init); + HOST_WIDE_INT total_bytes = int_size_in_bytes (type); + if (total_bytes < 0) + return 0; + if ((off == -1 && total_bytes > len) || off >= total_bytes) + return 0; + int o = off == -1 ? 0 : off; + if (TREE_CODE (type) == ARRAY_TYPE) + { + HOST_WIDE_INT min_index; + unsigned HOST_WIDE_INT cnt; + HOST_WIDE_INT curpos = 0, fieldsize; + constructor_elt *ce; + + if (TYPE_DOMAIN (type) == NULL_TREE + || !tree_fits_shwi_p (TYPE_MIN_VALUE (TYPE_DOMAIN (type)))) + return 0; + + fieldsize = int_size_in_bytes (TREE_TYPE (type)); + if (fieldsize <= 0) + return 0; + + min_index = tree_to_shwi (TYPE_MIN_VALUE (TYPE_DOMAIN (type))); + if (ptr != NULL) + memset (ptr, '\0', MIN (total_bytes - off, len)); + + FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (init), cnt, ce) + { + tree val = ce->value; + tree index = ce->index; + HOST_WIDE_INT pos = curpos, count = 0; + bool full = false; + if (index && TREE_CODE (index) == RANGE_EXPR) + { + if (!tree_fits_shwi_p (TREE_OPERAND (index, 0)) + || !tree_fits_shwi_p (TREE_OPERAND (index, 1))) + return 0; + pos = (tree_to_shwi (TREE_OPERAND (index, 0)) - min_index) + * fieldsize; + count = (tree_to_shwi (TREE_OPERAND (index, 1)) + - tree_to_shwi (TREE_OPERAND (index, 0))); + } + else if (index) + { + if (!tree_fits_shwi_p (index)) + return 0; + pos = (tree_to_shwi (index) - min_index) * fieldsize; + } + + curpos = pos; + if (val) + do + { + if (off == -1 + || (curpos >= off + && (curpos + fieldsize + <= (HOST_WIDE_INT) off + len))) + { + if (full) + { + if (ptr) + memcpy (ptr + (curpos - o), ptr + (pos - o), + fieldsize); + } + else if (!native_encode_initializer (val, + ptr + ? ptr + curpos - o + : NULL, + fieldsize, + off == -1 ? -1 + : 0)) + return 0; + else + { + full = true; + pos = curpos; + } + } + else if (curpos + fieldsize > off + && curpos < (HOST_WIDE_INT) off + len) + { + /* Partial overlap. */ + unsigned char *p = NULL; + int no = 0; + int l; + if (curpos >= off) + { + if (ptr) + p = ptr + curpos - off; + l = MIN ((HOST_WIDE_INT) off + len - curpos, + fieldsize); + } + else + { + p = ptr; + no = off - curpos; + l = len; + } + if (!native_encode_initializer (val, p, l, no)) + return 0; + } + curpos += fieldsize; + } + while (count-- != 0); + } + return MIN (total_bytes - off, len); + } + else if (TREE_CODE (type) == RECORD_TYPE + || TREE_CODE (type) == UNION_TYPE) + { + unsigned HOST_WIDE_INT cnt; + constructor_elt *ce; + + if (ptr != NULL) + memset (ptr, '\0', MIN (total_bytes - off, len)); + FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (init), cnt, ce) + { + tree field = ce->index; + tree val = ce->value; + HOST_WIDE_INT pos, fieldsize; + + if (field == NULL_TREE) + return 0; + + pos = int_byte_position (field); + if (off != -1 && (HOST_WIDE_INT) off + len <= pos) + continue; + + if (TREE_CODE (TREE_TYPE (field)) == ARRAY_TYPE + && TYPE_DOMAIN (TREE_TYPE (field)) + && ! TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (field)))) + return 0; + if (DECL_SIZE_UNIT (field) == NULL_TREE + || !tree_fits_shwi_p (DECL_SIZE_UNIT (field))) + return 0; + fieldsize = tree_to_shwi (DECL_SIZE_UNIT (field)); + if (fieldsize == 0) + continue; + + if (off != -1 && pos + fieldsize <= off) + continue; + + if (DECL_BIT_FIELD (field)) + return 0; + + if (val == NULL_TREE) + continue; + + if (off == -1 + || (pos >= off + && (pos + fieldsize <= (HOST_WIDE_INT) off + len))) + { + if (!native_encode_initializer (val, ptr ? ptr + pos - o + : NULL, + fieldsize, + off == -1 ? -1 : 0)) + return 0; + } + else + { + /* Partial overlap. */ + unsigned char *p = NULL; + int no = 0; + int l; + if (pos >= off) + { + if (ptr) + p = ptr + pos - off; + l = MIN ((HOST_WIDE_INT) off + len - pos, + fieldsize); + } + else + { + p = ptr; + no = off - pos; + l = len; + } + if (!native_encode_initializer (val, p, l, no)) + return 0; + } + } + return MIN (total_bytes - off, len); + } + return 0; + } +} + /* Subroutine of native_interpret_expr. Interpret the contents of the buffer PTR of length LEN as an INTEGER_CST of type TYPE. @@ -8129,7 +8334,7 @@ native_interpret_expr (tree type, const unsigned char *ptr, int len) /* Returns true if we can interpret the contents of a native encoding as TYPE. */ -static bool +bool can_native_interpret_type_p (tree type) { switch (TREE_CODE (type)) -- cgit v1.1 From 8aba425f4ebc5e2c054776d3cdddf13f7c1918f8 Mon Sep 17 00:00:00 2001 From: Jakub Jelinek Date: Thu, 13 Feb 2020 10:04:11 +0100 Subject: sccvn: Handle bitfields in vn_reference_lookup_3 [PR93582] The following patch is first step towards fixing PR93582. vn_reference_lookup_3 right now punts on anything that isn't byte aligned, so to be able to lookup a constant bitfield store, one needs to use the exact same COMPONENT_REF, otherwise it isn't found. This patch lifts up that that restriction if the bits to be loaded are covered by a single store of a constant (keeps the restriction so far for the multiple store case, can tweak that incrementally, but I think for bisection etc. it is worth to do it one step at a time). 2020-02-13 Jakub Jelinek PR tree-optimization/93582 * fold-const.h (shift_bytes_in_array_left, shift_bytes_in_array_right): Declare. * fold-const.c (shift_bytes_in_array_left, shift_bytes_in_array_right): New function, moved from gimple-ssa-store-merging.c, no longer static. * gimple-ssa-store-merging.c (shift_bytes_in_array): Move to gimple-ssa-store-merging.c and rename to shift_bytes_in_array_left. (shift_bytes_in_array_right): Move to gimple-ssa-store-merging.c. (encode_tree_to_bitpos): Use shift_bytes_in_array_left instead of shift_bytes_in_array. (verify_shift_bytes_in_array): Rename to ... (verify_shift_bytes_in_array_left): ... this. Use shift_bytes_in_array_left instead of shift_bytes_in_array. (store_merging_c_tests): Call verify_shift_bytes_in_array_left instead of verify_shift_bytes_in_array. * tree-ssa-sccvn.c (vn_reference_lookup_3): For native_encode_expr / native_interpret_expr where the store covers all needed bits, punt on PDP-endian, otherwise allow all involved offsets and sizes not to be byte-aligned. * gcc.dg/tree-ssa/pr93582-1.c: New test. * gcc.dg/tree-ssa/pr93582-2.c: New test. * gcc.dg/tree-ssa/pr93582-3.c: New test. --- gcc/fold-const.c | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) (limited to 'gcc/fold-const.c') diff --git a/gcc/fold-const.c b/gcc/fold-const.c index aefa916..71a1d3e 100644 --- a/gcc/fold-const.c +++ b/gcc/fold-const.c @@ -8354,6 +8354,70 @@ can_native_interpret_type_p (tree type) } } +/* Routines for manipulation of native_encode_expr encoded data if the encoded + or extracted constant positions and/or sizes aren't byte aligned. */ + +/* Shift left the bytes in PTR of SZ elements by AMNT bits, carrying over the + bits between adjacent elements. AMNT should be within + [0, BITS_PER_UNIT). + Example, AMNT = 2: + 00011111|11100000 << 2 = 01111111|10000000 + PTR[1] | PTR[0] PTR[1] | PTR[0]. */ + +void +shift_bytes_in_array_left (unsigned char *ptr, unsigned int sz, + unsigned int amnt) +{ + if (amnt == 0) + return; + + unsigned char carry_over = 0U; + unsigned char carry_mask = (~0U) << (unsigned char) (BITS_PER_UNIT - amnt); + unsigned char clear_mask = (~0U) << amnt; + + for (unsigned int i = 0; i < sz; i++) + { + unsigned prev_carry_over = carry_over; + carry_over = (ptr[i] & carry_mask) >> (BITS_PER_UNIT - amnt); + + ptr[i] <<= amnt; + if (i != 0) + { + ptr[i] &= clear_mask; + ptr[i] |= prev_carry_over; + } + } +} + +/* Like shift_bytes_in_array_left but for big-endian. + Shift right the bytes in PTR of SZ elements by AMNT bits, carrying over the + bits between adjacent elements. AMNT should be within + [0, BITS_PER_UNIT). + Example, AMNT = 2: + 00011111|11100000 >> 2 = 00000111|11111000 + PTR[0] | PTR[1] PTR[0] | PTR[1]. */ + +void +shift_bytes_in_array_right (unsigned char *ptr, unsigned int sz, + unsigned int amnt) +{ + if (amnt == 0) + return; + + unsigned char carry_over = 0U; + unsigned char carry_mask = ~(~0U << amnt); + + for (unsigned int i = 0; i < sz; i++) + { + unsigned prev_carry_over = carry_over; + carry_over = ptr[i] & carry_mask; + + carry_over <<= (unsigned char) BITS_PER_UNIT - amnt; + ptr[i] >>= amnt; + ptr[i] |= prev_carry_over; + } +} + /* Try to view-convert VECTOR_CST EXPR to VECTOR_TYPE TYPE by operating directly on the VECTOR_CST encoding, in a way that works for variable- length vectors. Return the resulting VECTOR_CST on success or null -- cgit v1.1 From 4e3d3e40726e1b68bf52fa205c68495124ea60b8 Mon Sep 17 00:00:00 2001 From: Richard Biener Date: Wed, 18 Mar 2020 09:13:17 +0100 Subject: middle-end/94188 fix fold of addr expression generation This adds a missing type conversion to build_fold_addr_expr and adjusts fallout - build_fold_addr_expr was used as a convenience to build an ADDR_EXPR but some callers do not expect the result to be simplified to something else. 2020-03-18 Richard Biener PR middle-end/94188 * fold-const.c (build_fold_addr_expr): Convert address to correct type. * asan.c (maybe_create_ssa_name): Strip useless type conversions. * gimple-fold.c (gimple_fold_stmt_to_constant_1): Use build1 to build the ADDR_EXPR which we don't really want to simplify. * tree-ssa-dom.c (record_equivalences_from_stmt): Likewise. * tree-ssa-loop-im.c (gather_mem_refs_stmt): Likewise. * tree-ssa-forwprop.c (forward_propagate_addr_expr_1): Likewise. (simplify_builtin_call): Strip useless type conversions. * tree-ssa-strlen.c (new_strinfo): Likewise. * gcc.dg/pr94188.c: New testcase. --- gcc/fold-const.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'gcc/fold-const.c') diff --git a/gcc/fold-const.c b/gcc/fold-const.c index 71a1d3e..3ab1a9a 100644 --- a/gcc/fold-const.c +++ b/gcc/fold-const.c @@ -8523,7 +8523,12 @@ build_fold_addr_expr_with_type_loc (location_t loc, tree t, tree ptrtype) } else if (TREE_CODE (t) == MEM_REF && integer_zerop (TREE_OPERAND (t, 1))) - return TREE_OPERAND (t, 0); + { + t = TREE_OPERAND (t, 0); + + if (TREE_TYPE (t) != ptrtype) + t = fold_convert_loc (loc, ptrtype, t); + } else if (TREE_CODE (t) == MEM_REF && TREE_CODE (TREE_OPERAND (t, 0)) == INTEGER_CST) return fold_binary (POINTER_PLUS_EXPR, ptrtype, -- cgit v1.1 From 73bc09fa8c6b973a928a599498caa66a25c8bc8d Mon Sep 17 00:00:00 2001 From: Richard Biener Date: Thu, 19 Mar 2020 10:15:52 +0100 Subject: middle-end/94216 fix another build_fold_addr_expr use 2020-03-19 Richard Biener PR middle-end/94216 * fold-const.c (fold_binary_loc): Avoid using build_fold_addr_expr when we really want an ADDR_EXPR. * g++.dg/torture/pr94216.C: New testcase. --- gcc/fold-const.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'gcc/fold-const.c') diff --git a/gcc/fold-const.c b/gcc/fold-const.c index 3ab1a9a..9267914 100644 --- a/gcc/fold-const.c +++ b/gcc/fold-const.c @@ -10284,7 +10284,7 @@ fold_binary_loc (location_t loc, enum tree_code code, tree type, if (!base) return NULL_TREE; return fold_build2 (MEM_REF, type, - build_fold_addr_expr (base), + build1 (ADDR_EXPR, TREE_TYPE (arg0), base), int_const_binop (PLUS_EXPR, arg1, size_int (coffset))); } -- cgit v1.1 From 1dcffc8ddc48f0b45d3d0d2f763ef5870560eb9a Mon Sep 17 00:00:00 2001 From: Jakub Jelinek Date: Tue, 31 Mar 2020 11:06:43 +0200 Subject: fold-const: Fix division folding with vector operands [PR94412] The following testcase is miscompiled since 4.9, we treat unsigned vector types as if they were signed and "optimize" negations across it. 2020-03-31 Marc Glisse Jakub Jelinek PR middle-end/94412 * fold-const.c (fold_binary_loc) : Use ANY_INTEGRAL_TYPE_P instead of INTEGRAL_TYPE_P. * gcc.c-torture/execute/pr94412.c: New test. Co-authored-by: Marc Glisse --- gcc/fold-const.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'gcc/fold-const.c') diff --git a/gcc/fold-const.c b/gcc/fold-const.c index 9267914..b79d059 100644 --- a/gcc/fold-const.c +++ b/gcc/fold-const.c @@ -11148,11 +11148,11 @@ fold_binary_loc (location_t loc, enum tree_code code, tree type, /* Convert -A / -B to A / B when the type is signed and overflow is undefined. */ - if ((!INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_UNDEFINED (type)) + if ((!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_UNDEFINED (type)) && TREE_CODE (op0) == NEGATE_EXPR && negate_expr_p (op1)) { - if (INTEGRAL_TYPE_P (type)) + if (ANY_INTEGRAL_TYPE_P (type)) fold_overflow_warning (("assuming signed overflow does not occur " "when distributing negation across " "division"), @@ -11162,11 +11162,11 @@ fold_binary_loc (location_t loc, enum tree_code code, tree type, TREE_OPERAND (arg0, 0)), negate_expr (op1)); } - if ((!INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_UNDEFINED (type)) + if ((!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_UNDEFINED (type)) && TREE_CODE (arg1) == NEGATE_EXPR && negate_expr_p (op0)) { - if (INTEGRAL_TYPE_P (type)) + if (ANY_INTEGRAL_TYPE_P (type)) fold_overflow_warning (("assuming signed overflow does not occur " "when distributing negation across " "division"), -- cgit v1.1 From 73a8043481d24ac86ce8d19459276181dfd9c858 Mon Sep 17 00:00:00 2001 From: Jakub Jelinek Date: Mon, 4 May 2020 10:57:46 +0200 Subject: match.pd: Move (X & C) eqne (Y & C) -> -> (X ^ Y) & C eqne 0 opt to match.pd [PR94718] This patch moves this optimization from fold-const.c to match.pd where it is actually much shorter to do and lets optimize even code not seen together in a single expression in the source, as the first step towards fixing the PR. 2020-05-04 Jakub Jelinek PR tree-optimization/94718 * fold-const.c (fold_binary_loc): Move (X & C) eqne (Y & C) -> (X ^ Y) & C eqne 0 optimization to ... * match.pd ((X & C) op (Y & C) into (X ^ Y) & C op 0): ... here. * gcc.dg/tree-ssa/pr94718-1.c: New test. * gcc.dg/tree-ssa/pr94718-2.c: New test. --- gcc/fold-const.c | 44 -------------------------------------------- 1 file changed, 44 deletions(-) (limited to 'gcc/fold-const.c') diff --git a/gcc/fold-const.c b/gcc/fold-const.c index b79d059..f054871 100644 --- a/gcc/fold-const.c +++ b/gcc/fold-const.c @@ -11631,50 +11631,6 @@ fold_binary_loc (location_t loc, enum tree_code code, tree type, return omit_one_operand_loc (loc, type, res, arg0); } - /* Fold (X & C) op (Y & C) as (X ^ Y) & C op 0", and symmetries. */ - if (TREE_CODE (arg0) == BIT_AND_EXPR - && TREE_CODE (arg1) == BIT_AND_EXPR) - { - tree arg00 = TREE_OPERAND (arg0, 0); - tree arg01 = TREE_OPERAND (arg0, 1); - tree arg10 = TREE_OPERAND (arg1, 0); - tree arg11 = TREE_OPERAND (arg1, 1); - tree itype = TREE_TYPE (arg0); - - if (operand_equal_p (arg01, arg11, 0)) - { - tem = fold_convert_loc (loc, itype, arg10); - tem = fold_build2_loc (loc, BIT_XOR_EXPR, itype, arg00, tem); - tem = fold_build2_loc (loc, BIT_AND_EXPR, itype, tem, arg01); - return fold_build2_loc (loc, code, type, tem, - build_zero_cst (itype)); - } - if (operand_equal_p (arg01, arg10, 0)) - { - tem = fold_convert_loc (loc, itype, arg11); - tem = fold_build2_loc (loc, BIT_XOR_EXPR, itype, arg00, tem); - tem = fold_build2_loc (loc, BIT_AND_EXPR, itype, tem, arg01); - return fold_build2_loc (loc, code, type, tem, - build_zero_cst (itype)); - } - if (operand_equal_p (arg00, arg11, 0)) - { - tem = fold_convert_loc (loc, itype, arg10); - tem = fold_build2_loc (loc, BIT_XOR_EXPR, itype, arg01, tem); - tem = fold_build2_loc (loc, BIT_AND_EXPR, itype, tem, arg00); - return fold_build2_loc (loc, code, type, tem, - build_zero_cst (itype)); - } - if (operand_equal_p (arg00, arg10, 0)) - { - tem = fold_convert_loc (loc, itype, arg11); - tem = fold_build2_loc (loc, BIT_XOR_EXPR, itype, arg01, tem); - tem = fold_build2_loc (loc, BIT_AND_EXPR, itype, tem, arg00); - return fold_build2_loc (loc, code, type, tem, - build_zero_cst (itype)); - } - } - if (TREE_CODE (arg0) == BIT_XOR_EXPR && TREE_CODE (arg1) == BIT_XOR_EXPR) { -- cgit v1.1 From fe7ebef7fe4f9acb79658ed9db0749b07efc3105 Mon Sep 17 00:00:00 2001 From: Eric Botcazou Date: Thu, 28 May 2020 00:31:15 +0200 Subject: Add support for __builtin_bswap128 This patch introduces a new builtin named __builtin_bswap128 on targets where TImode is supported, i.e. 64-bit targets only in practice. The implementation simply reuses the existing double word path in optab, so no routine is added to libgcc (which means that you get two calls to _bswapdi2 in the worst case). gcc/ChangeLog: * builtin-types.def (BT_UINT128): New primitive type. (BT_FN_UINT128_UINT128): New function type. * builtins.def (BUILT_IN_BSWAP128): New GCC builtin. * doc/extend.texi (__builtin_bswap128): Document it. * builtins.c (expand_builtin): Deal with BUILT_IN_BSWAP128. (is_inexpensive_builtin): Likewise. * fold-const-call.c (fold_const_call_ss): Likewise. * fold-const.c (tree_call_nonnegative_warnv_p): Likewise. * tree-ssa-ccp.c (evaluate_stmt): Likewise. * tree-vect-stmts.c (vect_get_data_ptr_increment): Likewise. (vectorizable_call): Likewise. * optabs.c (expand_unop): Always use the double word path for it. * tree-core.h (enum tree_index): Add TI_UINT128_TYPE. * tree.h (uint128_type_node): New global type. * tree.c (build_common_tree_nodes): Build it if TImode is supported. gcc/testsuite/ChangeLog: * gcc.dg/builtin-bswap-10.c: New test. * gcc.dg/builtin-bswap-11.c: Likewise. * gcc.dg/builtin-bswap-12.c: Likewise. * gcc.target/i386/builtin-bswap-5.c: Likewise. --- gcc/fold-const.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'gcc/fold-const.c') diff --git a/gcc/fold-const.c b/gcc/fold-const.c index f054871..212d0ba 100644 --- a/gcc/fold-const.c +++ b/gcc/fold-const.c @@ -13794,8 +13794,10 @@ tree_call_nonnegative_warnv_p (tree type, combined_fn fn, tree arg0, tree arg1, CASE_CFN_POPCOUNT: CASE_CFN_CLZ: CASE_CFN_CLRSB: + case CFN_BUILT_IN_BSWAP16: case CFN_BUILT_IN_BSWAP32: case CFN_BUILT_IN_BSWAP64: + case CFN_BUILT_IN_BSWAP128: /* Always true. */ return true; -- cgit v1.1