aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gdb/dwarf2/index-write.c69
-rw-r--r--gdb/testsuite/gdb.gdb/index-file.exp41
-rw-r--r--gdb/testsuite/lib/gdb.exp15
3 files changed, 125 insertions, 0 deletions
diff --git a/gdb/dwarf2/index-write.c b/gdb/dwarf2/index-write.c
index 7805c6a..e3ddb8d 100644
--- a/gdb/dwarf2/index-write.c
+++ b/gdb/dwarf2/index-write.c
@@ -212,6 +212,13 @@ struct mapped_symtab
void add_index_entry (const char *name, int is_static,
gdb_index_symbol_kind kind, offset_type cu_index);
+ /* When entries are originally added into the data hash the order will
+ vary based on the number of worker threads GDB is configured to use.
+ This function will rebuild the hash such that the final layout will be
+ deterministic regardless of the number of worker threads used. */
+
+ void sort ();
+
/* Access the obstack. */
struct obstack *obstack ()
{ return &m_string_obstack; }
@@ -298,6 +305,65 @@ mapped_symtab::hash_expand ()
}
}
+/* See mapped_symtab class declaration. */
+
+void mapped_symtab::sort ()
+{
+ /* Move contents out of this->data vector. */
+ std::vector<symtab_index_entry> original_data = std::move (m_data);
+
+ /* Restore the size of m_data, this will avoid having to expand the hash
+ table (and rehash all elements) when we reinsert after sorting.
+ However, we do reset the element count, this allows for some sanity
+ checking asserts during the reinsert phase. */
+ gdb_assert (m_data.size () == 0);
+ m_data.resize (original_data.size ());
+ m_element_count = 0;
+
+ /* Remove empty entries from ORIGINAL_DATA, this makes sorting quicker. */
+ auto it = std::remove_if (original_data.begin (), original_data.end (),
+ [] (const symtab_index_entry &entry) -> bool
+ {
+ return entry.name == nullptr;
+ });
+ original_data.erase (it, original_data.end ());
+
+ /* Sort the existing contents. */
+ std::sort (original_data.begin (), original_data.end (),
+ [] (const symtab_index_entry &a,
+ const symtab_index_entry &b) -> bool
+ {
+ /* Return true if A is before B. */
+ gdb_assert (a.name != nullptr);
+ gdb_assert (b.name != nullptr);
+
+ return strcmp (a.name, b.name) < 0;
+ });
+
+ /* Re-insert each item from the sorted list. */
+ for (auto &entry : original_data)
+ {
+ /* We know that ORIGINAL_DATA contains no duplicates, this data was
+ taken from a hash table that de-duplicated entries for us, so
+ count this as a new item.
+
+ As we retained the original size of m_data (see above) then we
+ should never need to grow m_data_ during this re-insertion phase,
+ assert that now. */
+ ++m_element_count;
+ gdb_assert (!this->hash_needs_expanding ());
+
+ /* Lookup a slot. */
+ symtab_index_entry &slot = this->find_slot (entry.name);
+
+ /* As discussed above, we should not find duplicates. */
+ gdb_assert (slot.name == nullptr);
+
+ /* Move this item into the slot we found. */
+ slot = std::move (entry);
+ }
+}
+
/* See class definition. */
void
@@ -1346,6 +1412,9 @@ write_gdbindex (dwarf2_per_bfd *per_bfd, cooked_index *table,
for (auto map : table->get_addrmaps ())
write_address_map (map, addr_vec, cu_index_htab);
+ /* Ensure symbol hash is built domestically. */
+ symtab.sort ();
+
/* Now that we've processed all symbols we can shrink their cu_indices
lists. */
symtab.minimize ();
diff --git a/gdb/testsuite/gdb.gdb/index-file.exp b/gdb/testsuite/gdb.gdb/index-file.exp
index c6edd28..0841592 100644
--- a/gdb/testsuite/gdb.gdb/index-file.exp
+++ b/gdb/testsuite/gdb.gdb/index-file.exp
@@ -35,6 +35,9 @@ with_timeout_factor $timeout_factor {
clean_restart $filename
}
+# Record how many worker threads GDB is using.
+set worker_threads [gdb_get_worker_threads]
+
# Generate an index file.
set dir1 [standard_output_file "index_1"]
remote_exec host "mkdir -p ${dir1}"
@@ -113,3 +116,41 @@ proc check_symbol_table_usage { filename } {
set index_filename_base [file tail $filename]
check_symbol_table_usage "$dir1/${index_filename_base}.gdb-index"
+
+# If GDB is using more than 1 worker thread then reduce the number of
+# worker threads, regenerate the index, and check that we get the same
+# index file back. At one point the layout of the index would vary
+# based on the number of worker threads used.
+if { $worker_threads > 1 } {
+ # Start GDB, but don't load a file yet.
+ clean_restart
+
+ # Adjust the number of threads to use.
+ set reduced_threads [expr $worker_threads / 2]
+ gdb_test_no_output "maint set worker-threads $reduced_threads"
+
+ with_timeout_factor $timeout_factor {
+ # Now load the test binary.
+ gdb_file_cmd $filename
+ }
+
+ # Generate an index file.
+ set dir2 [standard_output_file "index_2"]
+ remote_exec host "mkdir -p ${dir2}"
+ with_timeout_factor $timeout_factor {
+ gdb_test_no_output "save gdb-index $dir2" \
+ "create second gdb-index file"
+ }
+
+ # Close GDB.
+ gdb_exit
+
+ # Now check that the index files are identical.
+ foreach suffix { gdb-index } {
+ set result \
+ [remote_exec host \
+ "cmp -s \"$dir1/${index_filename_base}.${suffix}\" \"$dir2/${index_filename_base}.${suffix}\""]
+ gdb_assert { [lindex $result 0] == 0 } \
+ "$suffix files are identical"
+ }
+}
diff --git a/gdb/testsuite/lib/gdb.exp b/gdb/testsuite/lib/gdb.exp
index 6388586..b534a61 100644
--- a/gdb/testsuite/lib/gdb.exp
+++ b/gdb/testsuite/lib/gdb.exp
@@ -10026,6 +10026,21 @@ proc is_target_non_stop { {testname ""} } {
return $is_non_stop
}
+# Return the number of worker threads that GDB is currently using.
+
+proc gdb_get_worker_threads { {testname ""} } {
+ set worker_threads "UNKNOWN"
+ gdb_test_multiple "maintenance show worker-threads" $testname {
+ -wrap -re "^The number of worker threads GDB can use is unlimited \\(currently ($::decimal)\\)\\." {
+ set worker_threads $expect_out(1,string)
+ }
+ -wrap -re "The number of worker threads GDB can use is ($::decimal)\\." {
+ set worker_threads $expect_out(1,string)
+ }
+ }
+ return $worker_threads
+}
+
# Check if the compiler emits epilogue information associated
# with the closing brace or with the last statement line.
#