aboutsummaryrefslogtreecommitdiff
path: root/gcc/dse.cc
diff options
context:
space:
mode:
authorRichard Sandiford <richard.sandiford@arm.com>2024-08-29 14:00:23 +0100
committerRichard Sandiford <richard.sandiford@arm.com>2024-08-29 14:00:23 +0100
commit00ec6bd805924b2d7d72cf03b200b3b4b7831835 (patch)
tree8fe66507e6a41b5d453acfb0856f62e746daf45b /gcc/dse.cc
parent07e5e054a1c579dec3e1ed2192992b2fea14ad40 (diff)
downloadgcc-00ec6bd805924b2d7d72cf03b200b3b4b7831835.zip
gcc-00ec6bd805924b2d7d72cf03b200b3b4b7831835.tar.gz
gcc-00ec6bd805924b2d7d72cf03b200b3b4b7831835.tar.bz2
Make some smallest_int_mode_for_size calls cope with failure
smallest_int_mode_for_size now returns an optional mode rather than aborting on failure. This patch adjusts a couple of callers so that they fail gracefully when no mode exists. There should be no behavioural change, since anything that triggers the new return paths would previously have aborted. I just think this is how the code would have been written if the option had been available earlier. gcc/ * dse.cc (find_shift_sequence): Allow smallest_int_mode_for_size to failure. * optabs.cc (expand_twoval_binop_libfunc): Likewise.
Diffstat (limited to 'gcc/dse.cc')
-rw-r--r--gcc/dse.cc16
1 files changed, 8 insertions, 8 deletions
diff --git a/gcc/dse.cc b/gcc/dse.cc
index c3feff0..75825a4 100644
--- a/gcc/dse.cc
+++ b/gcc/dse.cc
@@ -1717,12 +1717,12 @@ dump_insn_info (const char * start, insn_info_t insn_info)
line up, we need to extract the value from lower part of the rhs of
the store, shift it, and then put it into a form that can be shoved
into the read_insn. This function generates a right SHIFT of a
- value that is at least ACCESS_SIZE bytes wide of READ_MODE. The
+ value that is at least ACCESS_BYTES bytes wide of READ_MODE. The
shift sequence is returned or NULL if we failed to find a
shift. */
static rtx
-find_shift_sequence (poly_int64 access_size,
+find_shift_sequence (poly_int64 access_bytes,
store_info *store_info,
machine_mode read_mode,
poly_int64 shift, bool speed, bool require_cst)
@@ -1734,11 +1734,11 @@ find_shift_sequence (poly_int64 access_size,
/* If a constant was stored into memory, try to simplify it here,
otherwise the cost of the shift might preclude this optimization
e.g. at -Os, even when no actual shift will be needed. */
+ auto access_bits = access_bytes * BITS_PER_UNIT;
if (store_info->const_rhs
- && known_le (access_size, GET_MODE_SIZE (MAX_MODE_INT)))
+ && known_le (access_bytes, GET_MODE_SIZE (MAX_MODE_INT))
+ && smallest_int_mode_for_size (access_bits).exists (&new_mode))
{
- auto new_mode = smallest_int_mode_for_size
- (access_size * BITS_PER_UNIT).require ();
auto byte = subreg_lowpart_offset (new_mode, store_mode);
rtx ret
= simplify_subreg (new_mode, store_info->const_rhs, store_mode, byte);
@@ -1810,7 +1810,7 @@ find_shift_sequence (poly_int64 access_size,
}
}
- if (maybe_lt (GET_MODE_SIZE (new_mode), access_size))
+ if (maybe_lt (GET_MODE_SIZE (new_mode), access_bytes))
continue;
new_reg = gen_reg_rtx (new_mode);
@@ -1839,8 +1839,8 @@ find_shift_sequence (poly_int64 access_size,
of the arguments and could be precomputed. It may
not be worth doing so. We could precompute if
worthwhile or at least cache the results. The result
- technically depends on both SHIFT and ACCESS_SIZE,
- but in practice the answer will depend only on ACCESS_SIZE. */
+ technically depends on both SHIFT and ACCESS_BYTES,
+ but in practice the answer will depend only on ACCESS_BYTES. */
if (cost > COSTS_N_INSNS (1))
continue;