From 59730cc567ecdd3a550d9c005dbaa63620cc0f19 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Tue, 20 Nov 2018 20:43:20 +0100 Subject: re PR target/88070 (ICE in create_pre_exit, at mode-switching.c:438) PR target/88070 * mode-switching.c (create_pre_exit): After reload, always split the fallthrough edge to the exit block. testsuite/ChangeLog: PR target/88070 * gcc.target/i386/pr88070.c: New test. From-SVN: r266326 --- gcc/mode-switching.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'gcc/mode-switching.c') diff --git a/gcc/mode-switching.c b/gcc/mode-switching.c index 370a49e..589fbeb 100644 --- a/gcc/mode-switching.c +++ b/gcc/mode-switching.c @@ -248,8 +248,22 @@ create_pre_exit (int n_entities, int *entity_map, const int *num_modes) gcc_assert (!pre_exit); /* If this function returns a value at the end, we have to insert the final mode switch before the return value copy - to its hard register. */ - if (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) == 1 + to its hard register. + + x86 targets use mode-switching infrastructure to + conditionally insert vzeroupper instruction at the exit + from the function where there is no need to switch the + mode before the return value copy. The vzeroupper insertion + pass runs after reload, so use !reload_completed as a stand-in + for x86 to skip the search for the return value copy insn. + + N.b.: the code below assumes that the return copy insn + immediately precedes its corresponding use insn. This + assumption does not hold after reload, since sched1 pass + can schedule the return copy insn away from its + corresponding use insn. */ + if (!reload_completed + && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) == 1 && NONJUMP_INSN_P ((last_insn = BB_END (src_bb))) && GET_CODE (PATTERN (last_insn)) == USE && GET_CODE ((ret_reg = XEXP (PATTERN (last_insn), 0))) == REG) -- cgit v1.1