aboutsummaryrefslogtreecommitdiff
path: root/gdbsupport
diff options
context:
space:
mode:
authorTom Tromey <tromey@adacore.com>2022-12-13 12:03:34 -0700
committerTom Tromey <tromey@adacore.com>2023-01-17 07:03:26 -0700
commit63078a04984b73e1fdeb4571a63605ee5c13f929 (patch)
treec5c7be98dd5323cc23b89bcc533fee341a94ff84 /gdbsupport
parent55e0daa3a3dcf6e1648fa96029a0a361cc110911 (diff)
downloadgdb-63078a04984b73e1fdeb4571a63605ee5c13f929.zip
gdb-63078a04984b73e1fdeb4571a63605ee5c13f929.tar.gz
gdb-63078a04984b73e1fdeb4571a63605ee5c13f929.tar.bz2
Avoid submitting empty tasks in parallel_for_each
I found that parallel_for_each would submit empty tasks to the thread pool. For example, this can happen if the number of tasks is smaller than the number of available threads. In the DWARF reader, this resulted in the cooked index containing empty sub-indices. This patch arranges to instead shrink the result vector and process the trailing entries in the calling thread.
Diffstat (limited to 'gdbsupport')
-rw-r--r--gdbsupport/parallel-for.h30
1 files changed, 30 insertions, 0 deletions
diff --git a/gdbsupport/parallel-for.h b/gdbsupport/parallel-for.h
index b565676..de9ebb1 100644
--- a/gdbsupport/parallel-for.h
+++ b/gdbsupport/parallel-for.h
@@ -70,6 +70,12 @@ public:
return result;
}
+ /* Resize the results to N. */
+ void resize (size_t n)
+ {
+ m_futures.resize (n);
+ }
+
private:
/* A vector of futures coming from the tasks run in the
@@ -108,6 +114,12 @@ public:
}
}
+ /* Resize the results to N. */
+ void resize (size_t n)
+ {
+ m_futures.resize (n);
+ }
+
private:
std::vector<gdb::future<void>> m_futures;
@@ -232,6 +244,24 @@ parallel_for_each (unsigned n, RandomIt first, RandomIt last,
end = j;
remaining_size -= chunk_size;
}
+
+ /* This case means we don't have enough elements to really
+ distribute them. Rather than ever submit a task that does
+ nothing, we short-circuit here. */
+ if (first == end)
+ end = last;
+
+ if (end == last)
+ {
+ /* We're about to dispatch the last batch of elements, which
+ we normally process in the main thread. So just truncate
+ the result list here. This avoids submitting empty tasks
+ to the thread pool. */
+ count = i;
+ results.resize (count);
+ break;
+ }
+
if (parallel_for_each_debug)
{
debug_printf (_("Parallel for: elements on worker thread %i\t: %zu"),