diff options
Diffstat (limited to 'gcc')
-rw-r--r-- | gcc/ChangeLog | 18 | ||||
-rw-r--r-- | gcc/combine.c | 16 | ||||
-rw-r--r-- | gcc/cse.c | 4 | ||||
-rw-r--r-- | gcc/double-int.h | 2 | ||||
-rw-r--r-- | gcc/dse.c | 2 | ||||
-rw-r--r-- | gcc/dwarf2asm.c | 2 | ||||
-rw-r--r-- | gcc/expmed.c | 4 | ||||
-rw-r--r-- | gcc/genmodes.c | 2 | ||||
-rw-r--r-- | gcc/match.pd | 4 | ||||
-rw-r--r-- | gcc/read-rtl.c | 2 | ||||
-rw-r--r-- | gcc/tree-ssa-loop-ivopts.c | 2 | ||||
-rw-r--r-- | gcc/tree-ssa-loop-prefetch.c | 2 | ||||
-rw-r--r-- | gcc/tree-vect-generic.c | 2 | ||||
-rw-r--r-- | gcc/tree-vect-patterns.c | 2 | ||||
-rw-r--r-- | gcc/tree.c | 4 |
15 files changed, 43 insertions, 25 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 36c1335..2763af1 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,21 @@ +2016-07-20 Uros Bizjak <ubizjak@gmail.com> + + * cse.c: Use HOST_WIDE_INT_M1 instead of ~(HOST_WIDE_INT) 0. + * combine.c: Use HOST_WIDE_INT_M1U instead of + ~(unsigned HOST_WIDE_INT) 0. + * double-int.h: Ditto. + * dse.c: Ditto. + * dwarf2asm.c:Ditto. + * expmed.c: Ditto. + * genmodes.c: Ditto. + * match.pd: Ditto. + * read-rtl.c: Ditto. + * tree-ssa-loop-ivopts.c: Ditto. + * tree-ssa-loop-prefetch.c: Ditto. + * tree-vect-generic.c: Ditto. + * tree-vect-patterns.c: Ditto. + * tree.c: Ditto. + 2016-07-20 Georg-Johann Lay <avr@gjlay.de> * gcc/config/avr.c (avr_legitimize_address) [AVR_TINY]: Force diff --git a/gcc/combine.c b/gcc/combine.c index 1e5ee8e..1becc3c 100644 --- a/gcc/combine.c +++ b/gcc/combine.c @@ -1660,7 +1660,7 @@ update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set, } /* Don't call nonzero_bits if it cannot change anything. */ - if (rsp->nonzero_bits != ~(unsigned HOST_WIDE_INT) 0) + if (rsp->nonzero_bits != HOST_WIDE_INT_M1U) { bits = nonzero_bits (src, nonzero_bits_mode); if (reg_equal && bits) @@ -6541,7 +6541,7 @@ simplify_set (rtx x) if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode)) { - src = force_to_mode (src, mode, ~(unsigned HOST_WIDE_INT) 0, 0); + src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, 0); SUBST (SET_SRC (x), src); } @@ -7446,7 +7446,7 @@ make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos, else new_rtx = force_to_mode (inner, tmode, len >= HOST_BITS_PER_WIDE_INT - ? ~(unsigned HOST_WIDE_INT) 0 + ? HOST_WIDE_INT_M1U : (HOST_WIDE_INT_1U << len) - 1, 0); @@ -7635,7 +7635,7 @@ make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos, inner = force_to_mode (inner, wanted_inner_mode, pos_rtx || len + orig_pos >= HOST_BITS_PER_WIDE_INT - ? ~(unsigned HOST_WIDE_INT) 0 + ? HOST_WIDE_INT_M1U : (((HOST_WIDE_INT_1U << len) - 1) << orig_pos), 0); @@ -8110,7 +8110,7 @@ make_compound_operation (rtx x, enum rtx_code in_code) && subreg_lowpart_p (x)) { rtx newer - = force_to_mode (tem, mode, ~(unsigned HOST_WIDE_INT) 0, 0); + = force_to_mode (tem, mode, HOST_WIDE_INT_M1U, 0); /* If we have something other than a SUBREG, we might have done an expansion, so rerun ourselves. */ @@ -8390,7 +8390,7 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, do not know, we need to assume that all bits up to the highest-order bit in MASK will be needed. This is how we form such a mask. */ if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1))) - fuller_mask = ~(unsigned HOST_WIDE_INT) 0; + fuller_mask = HOST_WIDE_INT_M1U; else fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1)) - 1); @@ -8733,7 +8733,7 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, if (GET_MODE_PRECISION (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT) { - nonzero = ~(unsigned HOST_WIDE_INT) 0; + nonzero = HOST_WIDE_INT_M1U; /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1)) is the number of bits a full-width mask would have set. @@ -9496,7 +9496,7 @@ make_field_assignment (rtx x) dest); src = force_to_mode (src, mode, GET_MODE_PRECISION (mode) >= HOST_BITS_PER_WIDE_INT - ? ~(unsigned HOST_WIDE_INT) 0 + ? HOST_WIDE_INT_M1U : (HOST_WIDE_INT_1U << len) - 1, 0); @@ -4565,7 +4565,7 @@ cse_insn (rtx_insn *insn) else shift = INTVAL (pos); if (INTVAL (width) == HOST_BITS_PER_WIDE_INT) - mask = ~(HOST_WIDE_INT) 0; + mask = HOST_WIDE_INT_M1; else mask = (HOST_WIDE_INT_1 << INTVAL (width)) - 1; val = (val >> shift) & mask; @@ -5233,7 +5233,7 @@ cse_insn (rtx_insn *insn) else shift = INTVAL (pos); if (INTVAL (width) == HOST_BITS_PER_WIDE_INT) - mask = ~(HOST_WIDE_INT) 0; + mask = HOST_WIDE_INT_M1; else mask = (HOST_WIDE_INT_1 << INTVAL (width)) - 1; val &= ~(mask << shift); diff --git a/gcc/double-int.h b/gcc/double-int.h index fd84b4b..6f59c1b 100644 --- a/gcc/double-int.h +++ b/gcc/double-int.h @@ -365,7 +365,7 @@ double_int::operator ^ (double_int b) const void dump_double_int (FILE *, double_int, bool); -#define ALL_ONES (~((unsigned HOST_WIDE_INT) 0)) +#define ALL_ONES HOST_WIDE_INT_M1U /* The operands of the following comparison functions must be processed with double_int_ext, if their precision is less than @@ -288,7 +288,7 @@ struct store_info static unsigned HOST_WIDE_INT lowpart_bitmask (int n) { - unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT) 0; + unsigned HOST_WIDE_INT mask = HOST_WIDE_INT_M1U; return mask >> (HOST_BITS_PER_WIDE_INT - n); } diff --git a/gcc/dwarf2asm.c b/gcc/dwarf2asm.c index bf5ded8..ae81445 100644 --- a/gcc/dwarf2asm.c +++ b/gcc/dwarf2asm.c @@ -97,7 +97,7 @@ dw2_asm_output_data (int size, unsigned HOST_WIDE_INT value, va_start (ap, comment); if (size * 8 < HOST_BITS_PER_WIDE_INT) - value &= ~(~(unsigned HOST_WIDE_INT) 0 << (size * 8)); + value &= ~(HOST_WIDE_INT_M1U << (size * 8)); if (op) { diff --git a/gcc/expmed.c b/gcc/expmed.c index 0b0abbc..f776e54 100644 --- a/gcc/expmed.c +++ b/gcc/expmed.c @@ -3513,7 +3513,7 @@ invert_mod2n (unsigned HOST_WIDE_INT x, int n) int nbit = 3; mask = (n == HOST_BITS_PER_WIDE_INT - ? ~(unsigned HOST_WIDE_INT) 0 + ? HOST_WIDE_INT_M1U : (HOST_WIDE_INT_1U << n) - 1); while (nbit < n) @@ -4423,7 +4423,7 @@ expand_divmod (int rem_flag, enum tree_code code, machine_mode mode, || size - 1 >= BITS_PER_WORD) goto fail1; - ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1); + ml |= HOST_WIDE_INT_M1U << (size - 1); mlr = gen_int_mode (ml, compute_mode); extra_cost = (shift_cost (speed, compute_mode, post_shift) + shift_cost (speed, compute_mode, size - 1) diff --git a/gcc/genmodes.c b/gcc/genmodes.c index 59faae9..097cc80 100644 --- a/gcc/genmodes.c +++ b/gcc/genmodes.c @@ -1409,7 +1409,7 @@ emit_mode_mask (void) puts ("\ #define MODE_MASK(m) \\\n\ ((m) >= HOST_BITS_PER_WIDE_INT) \\\n\ - ? ~(unsigned HOST_WIDE_INT) 0 \\\n\ + ? HOST_WIDE_INT_M1U \\\n\ : (HOST_WIDE_INT_1U << (m)) - 1\n"); for_all_modes (c, m) diff --git a/gcc/match.pd b/gcc/match.pd index 836f7d8..21bf617 100644 --- a/gcc/match.pd +++ b/gcc/match.pd @@ -1487,7 +1487,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) is all ones. */ } } - zerobits = ~(unsigned HOST_WIDE_INT) 0; + zerobits = HOST_WIDE_INT_M1U; if (shiftc < prec) { zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc; @@ -1522,7 +1522,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) break; } (if (prec < HOST_BITS_PER_WIDE_INT - || newmask == ~(unsigned HOST_WIDE_INT) 0) + || newmask == HOST_WIDE_INT_M1U) (with { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); } (if (!tree_int_cst_equal (newmaskt, @2)) diff --git a/gcc/read-rtl.c b/gcc/read-rtl.c index dc3a336..a705859 100644 --- a/gcc/read-rtl.c +++ b/gcc/read-rtl.c @@ -711,7 +711,7 @@ atoll (const char *p) if (new_wide < tmp_wide) { /* Return INT_MAX equiv on overflow. */ - tmp_wide = (~(unsigned HOST_WIDE_INT) 0) >> 1; + tmp_wide = HOST_WIDE_INT_M1U >> 1; break; } tmp_wide = new_wide; diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c index ed6bac9..62ba71b 100644 --- a/gcc/tree-ssa-loop-ivopts.c +++ b/gcc/tree-ssa-loop-ivopts.c @@ -4217,7 +4217,7 @@ get_address_cost (bool symbol_present, bool var_present, } bits = GET_MODE_BITSIZE (address_mode); - mask = ~(~(unsigned HOST_WIDE_INT) 0 << (bits - 1) << 1); + mask = ~(HOST_WIDE_INT_M1U << (bits - 1) << 1); offset &= mask; if ((offset >> (bits - 1) & 1)) offset |= ~mask; diff --git a/gcc/tree-ssa-loop-prefetch.c b/gcc/tree-ssa-loop-prefetch.c index e43ced6..26cf0a0 100644 --- a/gcc/tree-ssa-loop-prefetch.c +++ b/gcc/tree-ssa-loop-prefetch.c @@ -233,7 +233,7 @@ struct mem_ref_group /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */ -#define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0) +#define PREFETCH_ALL HOST_WIDE_INT_M1U /* Do not generate a prefetch if the unroll factor is significantly less than what is required by the prefetch. This is to avoid redundant diff --git a/gcc/tree-vect-generic.c b/gcc/tree-vect-generic.c index 5c4798a..9f0ec65 100644 --- a/gcc/tree-vect-generic.c +++ b/gcc/tree-vect-generic.c @@ -575,7 +575,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, if (ml >= HOST_WIDE_INT_1U << (prec - 1)) { this_mode = 4 + (d < 0); - ml |= (~(unsigned HOST_WIDE_INT) 0) << (prec - 1); + ml |= HOST_WIDE_INT_M1U << (prec - 1); } else this_mode = 2 + (d < 0); diff --git a/gcc/tree-vect-patterns.c b/gcc/tree-vect-patterns.c index d78f92d..2457844 100644 --- a/gcc/tree-vect-patterns.c +++ b/gcc/tree-vect-patterns.c @@ -2861,7 +2861,7 @@ vect_recog_divmod_pattern (vec<gimple *> *stmts, if (ml >= HOST_WIDE_INT_1U << (prec - 1)) { add = true; - ml |= (~(unsigned HOST_WIDE_INT) 0) << (prec - 1); + ml |= HOST_WIDE_INT_M1U << (prec - 1); } if (post_shift >= prec) return NULL; @@ -11338,9 +11338,9 @@ int_cst_value (const_tree x) { bool negative = ((val >> (bits - 1)) & 1) != 0; if (negative) - val |= (~(unsigned HOST_WIDE_INT) 0) << (bits - 1) << 1; + val |= HOST_WIDE_INT_M1U << (bits - 1) << 1; else - val &= ~((~(unsigned HOST_WIDE_INT) 0) << (bits - 1) << 1); + val &= ~(HOST_WIDE_INT_M1U << (bits - 1) << 1); } return val; |