aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Support/Allocator.cpp
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2014-03-28 08:53:25 +0000
committerChandler Carruth <chandlerc@gmail.com>2014-03-28 08:53:25 +0000
commit3b56b9cf90a9ff38184f43acbbe5ca2412312dfc (patch)
tree6e896f3e6a565bba0579ceca3142d50d1073711d /llvm/lib/Support/Allocator.cpp
parentead0f76443bc561cb8dd975b6e85552c2dcd6ef8 (diff)
downloadllvm-3b56b9cf90a9ff38184f43acbbe5ca2412312dfc.zip
llvm-3b56b9cf90a9ff38184f43acbbe5ca2412312dfc.tar.gz
llvm-3b56b9cf90a9ff38184f43acbbe5ca2412312dfc.tar.bz2
[Allocator Cleanup] Make the growth of the "slab" size of the
BumpPtrAllocator significantly less strange by making it a simple function of the number of slabs allocated rather than by making it a recurrance. I *think* the previous behavior was essentially that the size of the slabs would be doubled after the first 128 were allocated, and then doubled again each time 64 more were allocated, but only if every allocation packed perfectly into the slab size. If not, the wasted space wouldn't be counted toward increasing the size, but allocations over the size threshold *would*. And since the allocations over the size threshold might be much larger than the slab size, this could have somewhat surprising consequences where we rapidly grow the slab size. This currently requires adding state to the allocator to track the number of slabs currently allocated, but that isn't too bad. I'm planning further changes to the allocator that will make this state fall out even more naturally. It still doesn't fully decouple the growth rate from the allocations which are over the size threshold. That fix is coming later. This specific fix will allow making the entire thing into a more stateless device and lifting the parameters into template parameters rather than runtime parameters. llvm-svn: 204993
Diffstat (limited to 'llvm/lib/Support/Allocator.cpp')
-rw-r--r--llvm/lib/Support/Allocator.cpp34
1 files changed, 17 insertions, 17 deletions
diff --git a/llvm/lib/Support/Allocator.cpp b/llvm/lib/Support/Allocator.cpp
index 6e7a541..7df3835 100644
--- a/llvm/lib/Support/Allocator.cpp
+++ b/llvm/lib/Support/Allocator.cpp
@@ -24,11 +24,12 @@ namespace llvm {
BumpPtrAllocator::BumpPtrAllocator(size_t size, size_t threshold,
SlabAllocator &allocator)
: SlabSize(size), SizeThreshold(std::min(size, threshold)),
- Allocator(allocator), CurSlab(0), BytesAllocated(0) { }
+ Allocator(allocator), CurSlab(0), BytesAllocated(0), NumSlabs(0) {}
BumpPtrAllocator::BumpPtrAllocator(size_t size, size_t threshold)
: SlabSize(size), SizeThreshold(std::min(size, threshold)),
- Allocator(DefaultSlabAllocator), CurSlab(0), BytesAllocated(0) { }
+ Allocator(DefaultSlabAllocator), CurSlab(0), BytesAllocated(0),
+ NumSlabs(0) {}
BumpPtrAllocator::~BumpPtrAllocator() {
DeallocateSlabs(CurSlab);
@@ -49,13 +50,18 @@ char *BumpPtrAllocator::AlignPtr(char *Ptr, size_t Alignment) {
/// StartNewSlab - Allocate a new slab and move the bump pointers over into
/// the new slab. Modifies CurPtr and End.
void BumpPtrAllocator::StartNewSlab() {
- // If we allocated a big number of slabs already it's likely that we're going
- // to allocate more. Increase slab size to reduce mallocs and possibly memory
- // overhead. The factors are chosen conservatively to avoid overallocation.
- if (BytesAllocated >= SlabSize * 128)
- SlabSize *= 2;
-
- MemSlab *NewSlab = Allocator.Allocate(SlabSize);
+ ++NumSlabs;
+ // Scale the actual allocated slab size based on the number of slabs
+ // allocated. Every 128 slabs allocated, we double the allocated size to
+ // reduce allocation frequency, but saturate at multiplying the slab size by
+ // 2^30.
+ // FIXME: Currently, this count includes special slabs for objects above the
+ // size threshold. That will be fixed in a subsequent commit to make the
+ // growth even more predictable.
+ size_t AllocatedSlabSize =
+ SlabSize * (1 << std::min<size_t>(30, NumSlabs / 128));
+
+ MemSlab *NewSlab = Allocator.Allocate(AllocatedSlabSize);
NewSlab->NextPtr = CurSlab;
CurSlab = NewSlab;
CurPtr = (char*)(CurSlab + 1);
@@ -75,6 +81,7 @@ void BumpPtrAllocator::DeallocateSlabs(MemSlab *Slab) {
#endif
Allocator.Deallocate(Slab);
Slab = NextSlab;
+ --NumSlabs;
}
}
@@ -118,6 +125,7 @@ void *BumpPtrAllocator::Allocate(size_t Size, size_t Alignment) {
// If Size is really big, allocate a separate slab for it.
size_t PaddedSize = Size + sizeof(MemSlab) + Alignment - 1;
if (PaddedSize > SizeThreshold) {
+ ++NumSlabs;
MemSlab *NewSlab = Allocator.Allocate(PaddedSize);
// Put the new slab after the current slab, since we are not allocating
@@ -140,14 +148,6 @@ void *BumpPtrAllocator::Allocate(size_t Size, size_t Alignment) {
return Ptr;
}
-unsigned BumpPtrAllocator::GetNumSlabs() const {
- unsigned NumSlabs = 0;
- for (MemSlab *Slab = CurSlab; Slab != 0; Slab = Slab->NextPtr) {
- ++NumSlabs;
- }
- return NumSlabs;
-}
-
size_t BumpPtrAllocator::getTotalMemory() const {
size_t TotalMemory = 0;
for (MemSlab *Slab = CurSlab; Slab != 0; Slab = Slab->NextPtr) {