aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gcc/.cgraphunit.c.swpbin0 -> 126976 bytes
-rw-r--r--gcc/.lto-cgraph.c.swobin0 -> 16384 bytes
-rw-r--r--gcc/cgraphunit.c12
-rw-r--r--gcc/gcc.c9
-rw-r--r--gcc/ipa-split.c17
5 files changed, 26 insertions, 12 deletions
diff --git a/gcc/.cgraphunit.c.swp b/gcc/.cgraphunit.c.swp
new file mode 100644
index 0000000..83c78e6
--- /dev/null
+++ b/gcc/.cgraphunit.c.swp
Binary files differ
diff --git a/gcc/.lto-cgraph.c.swo b/gcc/.lto-cgraph.c.swo
new file mode 100644
index 0000000..a87c6db
--- /dev/null
+++ b/gcc/.lto-cgraph.c.swo
Binary files differ
diff --git a/gcc/cgraphunit.c b/gcc/cgraphunit.c
index 73e4bed..12d2d19 100644
--- a/gcc/cgraphunit.c
+++ b/gcc/cgraphunit.c
@@ -2794,10 +2794,22 @@ maybe_compile_in_parallel (void)
bool jobserver = false;
bool job_auto = false;
int num_jobs = -1;
+ unsigned long long insns = 0;
+ cgraph_node *cnode;
if (!flag_parallel_jobs || !split_outputs)
return false;
+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (cnode)
+ {
+ ipa_size_summary *ss = ipa_size_summaries->get (cnode);
+ if (!cnode->inlined_to && ss)
+ insns += ss->size;
+ }
+
+ if (insns < (unsigned long long) param_min_partition_size)
+ return false;
+
if (!strcmp (flag_parallel_jobs, "auto"))
{
jobserver = jobserver_initialize ();
diff --git a/gcc/gcc.c b/gcc/gcc.c
index c276a11..ba942c0 100644
--- a/gcc/gcc.c
+++ b/gcc/gcc.c
@@ -3405,14 +3405,7 @@ append_split_outputs (extra_arg_storer *storer,
argv[argc++] = extra_argument;
}
- if (have_c)
- {
- argv[argc++] = "-fPIE";
- argv[argc++] = "-fPIC";
- }
-
argv[argc] = NULL;
-
commands[0].argv = argv;
}
@@ -3977,7 +3970,7 @@ execute (void)
/* FIXME: Interact with GNU Jobserver if necessary. */
commands_batch = commands;
- n = flag_parallel_jobs? 1: n_commands;
+ n = n_commands;
for (int i = 0; i < n_commands; i += n)
{
diff --git a/gcc/ipa-split.c b/gcc/ipa-split.c
index 973e72c..09e9eb9 100644
--- a/gcc/ipa-split.c
+++ b/gcc/ipa-split.c
@@ -1909,8 +1909,12 @@ pass_split_functions::gate (function *)
{
/* When doing profile feedback, we want to execute the pass after profiling
is read. So disable one in early optimization. */
- return (flag_partial_inlining
- && !profile_arc_flag && !flag_branch_probabilities);
+
+ /* Disabled due to an issue regarding how the partitioner applier
+ handle clones generated by this pass. */
+ /*return (flag_partial_inlining
+ && !profile_arc_flag && !flag_branch_probabilities); */
+ return false;
}
} // anon namespace
@@ -1968,8 +1972,13 @@ pass_feedback_split_functions::gate (function *)
{
/* We don't need to split when profiling at all, we are producing
lousy code anyway. */
- return (flag_partial_inlining
- && flag_branch_probabilities);
+
+ /* Disabled due to an issue regarding how the partitioner applier
+ handle clones generated by this pass. */
+ /* return (flag_partial_inlining
+ && flag_branch_probabilities); */
+
+ return false;
}
} // anon namespace