diff options
author | Andrew Pinski <apinski@marvell.com> | 2023-05-19 18:52:45 +0000 |
---|---|---|
committer | Andrew Pinski <apinski@marvell.com> | 2023-05-20 05:04:49 +0000 |
commit | 5c68c27f3d093da89cdeee2706a03b8e759f111b (patch) | |
tree | d027ef13c83cf616bd32969bccb72314c69a18db /gcc/expr.cc | |
parent | 9d2fdcab162365caa23e5ade92d1404737e9b600 (diff) | |
download | gcc-5c68c27f3d093da89cdeee2706a03b8e759f111b.zip gcc-5c68c27f3d093da89cdeee2706a03b8e759f111b.tar.gz gcc-5c68c27f3d093da89cdeee2706a03b8e759f111b.tar.bz2 |
Simplify fold_single_bit_test with respect to code
Since we know that fold_single_bit_test is now only passed
NE_EXPR or EQ_EXPR, we can simplify it and just use a gcc_assert
to assert that is the code that is being passed.
gcc/ChangeLog:
* expr.cc (fold_single_bit_test): Add an assert
and simplify based on code being NE_EXPR or EQ_EXPR.
Diffstat (limited to 'gcc/expr.cc')
-rw-r--r-- | gcc/expr.cc | 108 |
1 files changed, 53 insertions, 55 deletions
diff --git a/gcc/expr.cc b/gcc/expr.cc index 67a9f82..b5bc3fa 100644 --- a/gcc/expr.cc +++ b/gcc/expr.cc @@ -12909,72 +12909,70 @@ fold_single_bit_test (location_t loc, enum tree_code code, tree inner, int bitnum, tree result_type) { - if ((code == NE_EXPR || code == EQ_EXPR)) - { - tree type = TREE_TYPE (inner); - scalar_int_mode operand_mode = SCALAR_INT_TYPE_MODE (type); - int ops_unsigned; - tree signed_type, unsigned_type, intermediate_type; - tree one; - gimple *inner_def; + gcc_assert (code == NE_EXPR || code == EQ_EXPR); - /* First, see if we can fold the single bit test into a sign-bit - test. */ - if (bitnum == TYPE_PRECISION (type) - 1 - && type_has_mode_precision_p (type)) - { - tree stype = signed_type_for (type); - return fold_build2_loc (loc, code == EQ_EXPR ? GE_EXPR : LT_EXPR, - result_type, - fold_convert_loc (loc, stype, inner), - build_int_cst (stype, 0)); - } + tree type = TREE_TYPE (inner); + scalar_int_mode operand_mode = SCALAR_INT_TYPE_MODE (type); + int ops_unsigned; + tree signed_type, unsigned_type, intermediate_type; + tree one; + gimple *inner_def; - /* Otherwise we have (A & C) != 0 where C is a single bit, - convert that into ((A >> C2) & 1). Where C2 = log2(C). - Similarly for (A & C) == 0. */ + /* First, see if we can fold the single bit test into a sign-bit + test. */ + if (bitnum == TYPE_PRECISION (type) - 1 + && type_has_mode_precision_p (type)) + { + tree stype = signed_type_for (type); + return fold_build2_loc (loc, code == EQ_EXPR ? GE_EXPR : LT_EXPR, + result_type, + fold_convert_loc (loc, stype, inner), + build_int_cst (stype, 0)); + } - /* If INNER is a right shift of a constant and it plus BITNUM does - not overflow, adjust BITNUM and INNER. */ - if ((inner_def = get_def_for_expr (inner, RSHIFT_EXPR)) - && TREE_CODE (gimple_assign_rhs2 (inner_def)) == INTEGER_CST - && bitnum < TYPE_PRECISION (type) - && wi::ltu_p (wi::to_wide (gimple_assign_rhs2 (inner_def)), - TYPE_PRECISION (type) - bitnum)) - { - bitnum += tree_to_uhwi (gimple_assign_rhs2 (inner_def)); - inner = gimple_assign_rhs1 (inner_def); - } + /* Otherwise we have (A & C) != 0 where C is a single bit, + convert that into ((A >> C2) & 1). Where C2 = log2(C). + Similarly for (A & C) == 0. */ - /* If we are going to be able to omit the AND below, we must do our - operations as unsigned. If we must use the AND, we have a choice. - Normally unsigned is faster, but for some machines signed is. */ - ops_unsigned = (load_extend_op (operand_mode) == SIGN_EXTEND - && !flag_syntax_only) ? 0 : 1; + /* If INNER is a right shift of a constant and it plus BITNUM does + not overflow, adjust BITNUM and INNER. */ + if ((inner_def = get_def_for_expr (inner, RSHIFT_EXPR)) + && TREE_CODE (gimple_assign_rhs2 (inner_def)) == INTEGER_CST + && bitnum < TYPE_PRECISION (type) + && wi::ltu_p (wi::to_wide (gimple_assign_rhs2 (inner_def)), + TYPE_PRECISION (type) - bitnum)) + { + bitnum += tree_to_uhwi (gimple_assign_rhs2 (inner_def)); + inner = gimple_assign_rhs1 (inner_def); + } - signed_type = lang_hooks.types.type_for_mode (operand_mode, 0); - unsigned_type = lang_hooks.types.type_for_mode (operand_mode, 1); - intermediate_type = ops_unsigned ? unsigned_type : signed_type; - inner = fold_convert_loc (loc, intermediate_type, inner); + /* If we are going to be able to omit the AND below, we must do our + operations as unsigned. If we must use the AND, we have a choice. + Normally unsigned is faster, but for some machines signed is. */ + ops_unsigned = (load_extend_op (operand_mode) == SIGN_EXTEND + && !flag_syntax_only) ? 0 : 1; - if (bitnum != 0) - inner = build2 (RSHIFT_EXPR, intermediate_type, - inner, size_int (bitnum)); + signed_type = lang_hooks.types.type_for_mode (operand_mode, 0); + unsigned_type = lang_hooks.types.type_for_mode (operand_mode, 1); + intermediate_type = ops_unsigned ? unsigned_type : signed_type; + inner = fold_convert_loc (loc, intermediate_type, inner); - one = build_int_cst (intermediate_type, 1); + if (bitnum != 0) + inner = build2 (RSHIFT_EXPR, intermediate_type, + inner, size_int (bitnum)); - if (code == EQ_EXPR) - inner = fold_build2_loc (loc, BIT_XOR_EXPR, intermediate_type, inner, one); + one = build_int_cst (intermediate_type, 1); - /* Put the AND last so it can combine with more things. */ - inner = build2 (BIT_AND_EXPR, intermediate_type, inner, one); + if (code == EQ_EXPR) + inner = fold_build2_loc (loc, BIT_XOR_EXPR, intermediate_type, inner, one); - /* Make sure to return the proper type. */ - inner = fold_convert_loc (loc, result_type, inner); + /* Put the AND last so it can combine with more things. */ + inner = build2 (BIT_AND_EXPR, intermediate_type, inner, one); - return inner; - } - return NULL_TREE; + /* Make sure to return the proper type. */ + inner = fold_convert_loc (loc, result_type, inner); + + return inner; } /* Generate code to calculate OPS, and exploded expression |