diff options
author | Richard Biener <rguenther@suse.de> | 2020-05-18 09:17:24 +0200 |
---|---|---|
committer | Richard Biener <rguenther@suse.de> | 2020-05-18 11:53:10 +0200 |
commit | 52a0f83980082c9995f2d8ec9b88548520fb8a5f (patch) | |
tree | b692c135345da7a3eeeea1ac268995d4603d2339 /gcc/tree-ssa-loop-im.c | |
parent | 03d549090e3551eb3c4a41a5d63a76cff7112c7b (diff) | |
download | gcc-52a0f83980082c9995f2d8ec9b88548520fb8a5f.zip gcc-52a0f83980082c9995f2d8ec9b88548520fb8a5f.tar.gz gcc-52a0f83980082c9995f2d8ec9b88548520fb8a5f.tar.bz2 |
tree-optimization/95172 - avoid mixing conditionalized and ordered SM
The following testcase shows a missed optimization that then leads to
wrong-code when issueing SMed stores on exits. When we were able to
compute an ordered sequence of stores for an exit we need to emit
that in the correct order and we can emit it disregarding to any
conditional for whether a store actually happened (we know it did).
We can also improve detection as of whether we need conditional
processing at all. Both parts fix the testcase.
2020-05-18 Richard Biener <rguenther@suse.de>
PR tree-optimization/95172
* tree-ssa-loop-im.c (execute_sm): Get flag whether we
eventually need the conditional processing.
(execute_sm_exit): When processing an orderd sequence
avoid doing any conditional processing.
(hoist_memory_references): Pass down whether all edges
have ordered processing for a ref to execute_sm.
* gcc.dg/torture/pr95172.c: New testcase.
Diffstat (limited to 'gcc/tree-ssa-loop-im.c')
-rw-r--r-- | gcc/tree-ssa-loop-im.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c index 0d77aaa..63f4ef8 100644 --- a/gcc/tree-ssa-loop-im.c +++ b/gcc/tree-ssa-loop-im.c @@ -2130,7 +2130,7 @@ struct sm_aux static void execute_sm (class loop *loop, im_mem_ref *ref, - hash_map<im_mem_ref *, sm_aux *> &aux_map) + hash_map<im_mem_ref *, sm_aux *> &aux_map, bool maybe_mt) { gassign *load; struct fmt_data fmt_data; @@ -2154,8 +2154,9 @@ execute_sm (class loop *loop, im_mem_ref *ref, for_each_index (&ref->mem.ref, force_move_till, &fmt_data); bool always_stored = ref_always_accessed_p (loop, ref, true); - if (bb_in_transaction (loop_preheader_edge (loop)->src) - || (! flag_store_data_races && ! always_stored)) + if (maybe_mt + && (bb_in_transaction (loop_preheader_edge (loop)->src) + || (! flag_store_data_races && ! always_stored))) multi_threaded_model_p = true; if (multi_threaded_model_p) @@ -2244,7 +2245,7 @@ execute_sm_exit (class loop *loop, edge ex, vec<seq_entry> &seq, else { sm_aux *aux = *aux_map.get (ref); - if (!aux->store_flag) + if (!aux->store_flag || kind == sm_ord) { gassign *store; store = gimple_build_assign (unshare_expr (ref->mem.ref), @@ -2630,7 +2631,7 @@ hoist_memory_references (class loop *loop, bitmap mem_refs, EXECUTE_IF_SET_IN_BITMAP (mem_refs, 0, i, bi) { ref = memory_accesses.refs_list[i]; - execute_sm (loop, ref, aux_map); + execute_sm (loop, ref, aux_map, bitmap_bit_p (refs_not_supported, i)); } /* Materialize ordered store sequences on exits. */ |