aboutsummaryrefslogtreecommitdiff
path: root/bfd/dwarf2.c
diff options
context:
space:
mode:
authorSteinar H. Gunderson <steinar+sourceware@gunderson.no>2023-05-19 09:14:54 +0000
committerAlan Modra <amodra@gmail.com>2024-02-15 13:19:45 +1030
commit7bd1e04a3532ed3f833a79a40bd7bc0bd48706ad (patch)
tree98107fb7d9224470cc2dd726ed66c5aad1fec1eb /bfd/dwarf2.c
parent2fbbadc2c336cad228be998a118e3bab3be30757 (diff)
downloadgdb-7bd1e04a3532ed3f833a79a40bd7bc0bd48706ad.zip
gdb-7bd1e04a3532ed3f833a79a40bd7bc0bd48706ad.tar.gz
gdb-7bd1e04a3532ed3f833a79a40bd7bc0bd48706ad.tar.bz2
PR29785, memory bloat after b43771b045fb
Pathological cases of dwarf info with overlapping duplicate memory ranges can cause splitting of trie leaf nodes, which in the worst case will cause memory to increase without bounds. PR 29785 * dwarf2.c (insert_arange_in_trie): Don't split leaf nodes unless that reduces number of elements in at least one node.
Diffstat (limited to 'bfd/dwarf2.c')
-rw-r--r--bfd/dwarf2.c30
1 files changed, 25 insertions, 5 deletions
diff --git a/bfd/dwarf2.c b/bfd/dwarf2.c
index 00d7c6a..8491257 100644
--- a/bfd/dwarf2.c
+++ b/bfd/dwarf2.c
@@ -2148,9 +2148,12 @@ insert_arange_in_trie (bfd *abfd,
bfd_vma low_pc,
bfd_vma high_pc)
{
+ bfd_vma bucket_high_pc =
+ trie_pc + ((bfd_vma) -1 >> trie_pc_bits); /* Inclusive. */
bfd_vma clamped_low_pc, clamped_high_pc;
int ch, from_ch, to_ch;
bool is_full_leaf = false;
+ bool splitting_leaf_will_help = false;
/* See if we can extend any of the existing ranges. This merging
isn't perfect (if merging opens up the possibility of merging two existing
@@ -2176,11 +2179,29 @@ insert_arange_in_trie (bfd *abfd,
}
is_full_leaf = leaf->num_stored_in_leaf == trie->num_room_in_leaf;
+
+ if (is_full_leaf)
+ {
+ /* See if we have at least one leaf that does _not_ cover the
+ entire bucket, so that splitting will actually reduce the number
+ of elements in at least one of the child nodes. (For simplicity,
+ we don't test the range we're inserting, but it will be counted
+ on the next insertion where we're full, if any.) */
+ for (i = 0; i < leaf->num_stored_in_leaf; ++i)
+ {
+ if (leaf->ranges[i].low_pc > trie_pc
+ || leaf->ranges[i].high_pc <= bucket_high_pc)
+ {
+ splitting_leaf_will_help = true;
+ break;
+ }
+ }
+ }
}
/* If we're a leaf with no more room and we're _not_ at the bottom,
convert to an interior node. */
- if (is_full_leaf && trie_pc_bits < VMA_BITS)
+ if (is_full_leaf && splitting_leaf_will_help && trie_pc_bits < VMA_BITS)
{
const struct trie_leaf *leaf = (struct trie_leaf *) trie;
unsigned int i;
@@ -2202,8 +2223,9 @@ insert_arange_in_trie (bfd *abfd,
}
}
- /* If we're a leaf with no more room and we _are_ at the bottom,
- we have no choice but to just make it larger. */
+ /* If we're a leaf with no more room and we _are_ at the bottom
+ (or splitting it won't help), we have no choice but to just
+ make it larger. */
if (is_full_leaf)
{
const struct trie_leaf *leaf = (struct trie_leaf *) trie;
@@ -2243,8 +2265,6 @@ insert_arange_in_trie (bfd *abfd,
clamped_high_pc = high_pc;
if (trie_pc_bits > 0)
{
- bfd_vma bucket_high_pc =
- trie_pc + ((bfd_vma) -1 >> trie_pc_bits); /* Inclusive. */
if (clamped_low_pc < trie_pc)
clamped_low_pc = trie_pc;
if (clamped_high_pc > bucket_high_pc)