aboutsummaryrefslogtreecommitdiff
path: root/gcc/fold-const.c
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/fold-const.c')
-rw-r--r--gcc/fold-const.c100
1 files changed, 0 insertions, 100 deletions
diff --git a/gcc/fold-const.c b/gcc/fold-const.c
index 60aa210..6fa784a 100644
--- a/gcc/fold-const.c
+++ b/gcc/fold-const.c
@@ -11516,106 +11516,6 @@ fold_binary_loc (location_t loc,
return build_int_cst (type, residue & low);
}
- /* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
- (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
- if the new mask might be further optimized. */
- if ((TREE_CODE (arg0) == LSHIFT_EXPR
- || TREE_CODE (arg0) == RSHIFT_EXPR)
- && TYPE_PRECISION (TREE_TYPE (arg0)) <= HOST_BITS_PER_WIDE_INT
- && TREE_CODE (arg1) == INTEGER_CST
- && tree_fits_uhwi_p (TREE_OPERAND (arg0, 1))
- && tree_to_uhwi (TREE_OPERAND (arg0, 1)) > 0
- && (tree_to_uhwi (TREE_OPERAND (arg0, 1))
- < TYPE_PRECISION (TREE_TYPE (arg0))))
- {
- unsigned int shiftc = tree_to_uhwi (TREE_OPERAND (arg0, 1));
- unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (arg1);
- unsigned HOST_WIDE_INT newmask, zerobits = 0;
- tree shift_type = TREE_TYPE (arg0);
-
- if (TREE_CODE (arg0) == LSHIFT_EXPR)
- zerobits = ((((unsigned HOST_WIDE_INT) 1) << shiftc) - 1);
- else if (TREE_CODE (arg0) == RSHIFT_EXPR
- && TYPE_PRECISION (TREE_TYPE (arg0))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (arg0))))
- {
- prec = TYPE_PRECISION (TREE_TYPE (arg0));
- tree arg00 = TREE_OPERAND (arg0, 0);
- /* See if more bits can be proven as zero because of
- zero extension. */
- if (TREE_CODE (arg00) == NOP_EXPR
- && TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg00, 0))))
- {
- tree inner_type = TREE_TYPE (TREE_OPERAND (arg00, 0));
- if (TYPE_PRECISION (inner_type)
- == GET_MODE_PRECISION (TYPE_MODE (inner_type))
- && TYPE_PRECISION (inner_type) < prec)
- {
- prec = TYPE_PRECISION (inner_type);
- /* See if we can shorten the right shift. */
- if (shiftc < prec)
- shift_type = inner_type;
- /* Otherwise X >> C1 is all zeros, so we'll optimize
- it into (X, 0) later on by making sure zerobits
- is all ones. */
- }
- }
- zerobits = ~(unsigned HOST_WIDE_INT) 0;
- if (shiftc < prec)
- {
- zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
- zerobits <<= prec - shiftc;
- }
- /* For arithmetic shift if sign bit could be set, zerobits
- can contain actually sign bits, so no transformation is
- possible, unless MASK masks them all away. In that
- case the shift needs to be converted into logical shift. */
- if (!TYPE_UNSIGNED (TREE_TYPE (arg0))
- && prec == TYPE_PRECISION (TREE_TYPE (arg0)))
- {
- if ((mask & zerobits) == 0)
- shift_type = unsigned_type_for (TREE_TYPE (arg0));
- else
- zerobits = 0;
- }
- }
-
- /* ((X << 16) & 0xff00) is (X, 0). */
- if ((mask & zerobits) == mask)
- return omit_one_operand_loc (loc, type,
- build_int_cst (type, 0), arg0);
-
- newmask = mask | zerobits;
- if (newmask != mask && (newmask & (newmask + 1)) == 0)
- {
- /* Only do the transformation if NEWMASK is some integer
- mode's mask. */
- for (prec = BITS_PER_UNIT;
- prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
- if (newmask == (((unsigned HOST_WIDE_INT) 1) << prec) - 1)
- break;
- if (prec < HOST_BITS_PER_WIDE_INT
- || newmask == ~(unsigned HOST_WIDE_INT) 0)
- {
- tree newmaskt;
-
- if (shift_type != TREE_TYPE (arg0))
- {
- tem = fold_build2_loc (loc, TREE_CODE (arg0), shift_type,
- fold_convert_loc (loc, shift_type,
- TREE_OPERAND (arg0, 0)),
- TREE_OPERAND (arg0, 1));
- tem = fold_convert_loc (loc, type, tem);
- }
- else
- tem = op0;
- newmaskt = build_int_cst_type (TREE_TYPE (op1), newmask);
- if (!tree_int_cst_equal (newmaskt, arg1))
- return fold_build2_loc (loc, BIT_AND_EXPR, type, tem, newmaskt);
- }
- }
- }
-
goto associate;
case RDIV_EXPR: