diff options
author | Tom Tromey <tromey@adacore.com> | 2022-12-13 12:03:34 -0700 |
---|---|---|
committer | Tom Tromey <tromey@adacore.com> | 2023-01-17 07:03:26 -0700 |
commit | 63078a04984b73e1fdeb4571a63605ee5c13f929 (patch) | |
tree | c5c7be98dd5323cc23b89bcc533fee341a94ff84 | |
parent | 55e0daa3a3dcf6e1648fa96029a0a361cc110911 (diff) | |
download | gdb-63078a04984b73e1fdeb4571a63605ee5c13f929.zip gdb-63078a04984b73e1fdeb4571a63605ee5c13f929.tar.gz gdb-63078a04984b73e1fdeb4571a63605ee5c13f929.tar.bz2 |
Avoid submitting empty tasks in parallel_for_each
I found that parallel_for_each would submit empty tasks to the thread
pool. For example, this can happen if the number of tasks is smaller
than the number of available threads. In the DWARF reader, this
resulted in the cooked index containing empty sub-indices. This patch
arranges to instead shrink the result vector and process the trailing
entries in the calling thread.
-rw-r--r-- | gdb/unittests/parallel-for-selftests.c | 39 | ||||
-rw-r--r-- | gdbsupport/parallel-for.h | 30 |
2 files changed, 69 insertions, 0 deletions
diff --git a/gdb/unittests/parallel-for-selftests.c b/gdb/unittests/parallel-for-selftests.c index 3162db1..15a095a 100644 --- a/gdb/unittests/parallel-for-selftests.c +++ b/gdb/unittests/parallel-for-selftests.c @@ -149,6 +149,45 @@ TEST (int n_threads) SELF_CHECK (counter == NUMBER); #undef NUMBER + + /* Check that if there are fewer tasks than threads, then we won't + end up with a null result. */ + std::vector<std::unique_ptr<int>> intresults; + std::atomic<bool> any_empty_tasks (false); + + FOR_EACH (1, 0, 1, + [&] (int start, int end) + { + if (start == end) + any_empty_tasks = true; + return std::unique_ptr<int> (new int (end - start)); + }); + SELF_CHECK (!any_empty_tasks); + SELF_CHECK (std::all_of (intresults.begin (), + intresults.end (), + [] (const std::unique_ptr<int> &entry) + { + return entry != nullptr; + })); + + /* The same but using the task size parameter. */ + intresults.clear (); + any_empty_tasks = false; + FOR_EACH (1, 0, 1, + [&] (int start, int end) + { + if (start == end) + any_empty_tasks = true; + return std::unique_ptr<int> (new int (end - start)); + }, + task_size_one); + SELF_CHECK (!any_empty_tasks); + SELF_CHECK (std::all_of (intresults.begin (), + intresults.end (), + [] (const std::unique_ptr<int> &entry) + { + return entry != nullptr; + })); } #endif /* FOR_EACH */ diff --git a/gdbsupport/parallel-for.h b/gdbsupport/parallel-for.h index b565676..de9ebb1 100644 --- a/gdbsupport/parallel-for.h +++ b/gdbsupport/parallel-for.h @@ -70,6 +70,12 @@ public: return result; } + /* Resize the results to N. */ + void resize (size_t n) + { + m_futures.resize (n); + } + private: /* A vector of futures coming from the tasks run in the @@ -108,6 +114,12 @@ public: } } + /* Resize the results to N. */ + void resize (size_t n) + { + m_futures.resize (n); + } + private: std::vector<gdb::future<void>> m_futures; @@ -232,6 +244,24 @@ parallel_for_each (unsigned n, RandomIt first, RandomIt last, end = j; remaining_size -= chunk_size; } + + /* This case means we don't have enough elements to really + distribute them. Rather than ever submit a task that does + nothing, we short-circuit here. */ + if (first == end) + end = last; + + if (end == last) + { + /* We're about to dispatch the last batch of elements, which + we normally process in the main thread. So just truncate + the result list here. This avoids submitting empty tasks + to the thread pool. */ + count = i; + results.resize (count); + break; + } + if (parallel_for_each_debug) { debug_printf (_("Parallel for: elements on worker thread %i\t: %zu"), |