aboutsummaryrefslogtreecommitdiff
path: root/libstdc++-v3
diff options
context:
space:
mode:
authorJonathan Wakely <jwakely@redhat.com>2018-11-13 22:57:53 +0000
committerJonathan Wakely <redi@gcc.gnu.org>2018-11-13 22:57:53 +0000
commitf2e005857e52ded5338e1179c0bb0fe3375cea71 (patch)
tree7215d345c266d8938cae0627f3d5592b824785d4 /libstdc++-v3
parentd3306a84a6cc954ff9d28d8a915a891fe15270f5 (diff)
downloadgcc-f2e005857e52ded5338e1179c0bb0fe3375cea71.zip
gcc-f2e005857e52ded5338e1179c0bb0fe3375cea71.tar.gz
gcc-f2e005857e52ded5338e1179c0bb0fe3375cea71.tar.bz2
Improve handling of pool_options::largest_required_pool_block
Make the munge_options function round the largest_required_pool_block value to a multiple of the smallest pool size (currently 8 bytes) to avoid pools with odd sizes. Ensure there is a pool large enough for blocks of the requested size. Previously when largest_required_pool_block was exactly equal to one of the pool_sizes[] values there would be no pool of that size. This patch increases _M_npools by one, so there is a pool at least as large as the requested value. It also reduces the size of the largest pool to be no larger than needed. * src/c++17/memory_resource.cc (munge_options): Round up value of largest_required_pool_block to multiple of smallest pool size. Round excessively large values down to largest pool size. (select_num_pools): Increase number of pools by one unless it exactly matches requested largest_required_pool_block. (__pool_resource::_M_alloc_pools()): Make largest pool size equal largest_required_pool_block. * testsuite/20_util/unsynchronized_pool_resource/options.cc: Check that pool_options::largest_required_pool_block is set appropriately. From-SVN: r266089
Diffstat (limited to 'libstdc++-v3')
-rw-r--r--libstdc++-v3/ChangeLog10
-rw-r--r--libstdc++-v3/src/c++17/memory_resource.cc50
-rw-r--r--libstdc++-v3/testsuite/20_util/unsynchronized_pool_resource/options.cc62
3 files changed, 101 insertions, 21 deletions
diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog
index 9b6a72d..97fbea7 100644
--- a/libstdc++-v3/ChangeLog
+++ b/libstdc++-v3/ChangeLog
@@ -1,5 +1,15 @@
2018-11-13 Jonathan Wakely <jwakely@redhat.com>
+ * src/c++17/memory_resource.cc (munge_options): Round up value of
+ largest_required_pool_block to multiple of smallest pool size. Round
+ excessively large values down to largest pool size.
+ (select_num_pools): Increase number of pools by one unless it exactly
+ matches requested largest_required_pool_block.
+ (__pool_resource::_M_alloc_pools()): Make largest pool size equal
+ largest_required_pool_block.
+ * testsuite/20_util/unsynchronized_pool_resource/options.cc: Check
+ that pool_options::largest_required_pool_block is set appropriately.
+
* src/c++17/memory_resource.cc (big_block): Improve comments.
(big_block::all_ones): Remove.
(big_block::big_block(size_t, size_t)): Use alloc_size.
diff --git a/libstdc++-v3/src/c++17/memory_resource.cc b/libstdc++-v3/src/c++17/memory_resource.cc
index 719cb9f..691a299 100644
--- a/libstdc++-v3/src/c++17/memory_resource.cc
+++ b/libstdc++-v3/src/c++17/memory_resource.cc
@@ -830,6 +830,19 @@ namespace pmr
namespace {
+ constexpr size_t pool_sizes[] = {
+ 8, 16, 24,
+ 32, 48,
+ 64, 80, 96, 112,
+ 128, 192,
+ 256, 320, 384, 448,
+ 512, 768,
+ 1024, 1536,
+ 2048, 3072,
+ 1<<12, 1<<13, 1<<14, 1<<15, 1<<16, 1<<17,
+ 1<<20, 1<<21, 1<<22 // 4MB should be enough for anybody
+ };
+
pool_options
munge_options(pool_options opts)
{
@@ -860,29 +873,25 @@ namespace pmr
}
else
{
- // TODO round to preferred granularity ?
+ // Round to preferred granularity
+ static_assert(std::__ispow2(pool_sizes[0]));
+ constexpr size_t mask = pool_sizes[0] - 1;
+ opts.largest_required_pool_block += mask;
+ opts.largest_required_pool_block &= ~mask;
}
if (opts.largest_required_pool_block < big_block::min)
{
opts.largest_required_pool_block = big_block::min;
}
+ else if (opts.largest_required_pool_block > std::end(pool_sizes)[-1])
+ {
+ // Setting _M_opts to the largest pool allows users to query it:
+ opts.largest_required_pool_block = std::end(pool_sizes)[-1];
+ }
return opts;
}
- const size_t pool_sizes[] = {
- 8, 16, 24,
- 32, 48,
- 64, 80, 96, 112,
- 128, 192,
- 256, 320, 384, 448,
- 512, 768,
- 1024, 1536,
- 2048, 3072,
- 1<<12, 1<<13, 1<<14, 1<<15, 1<<16, 1<<17,
- 1<<20, 1<<21, 1<<22 // 4MB should be enough for anybody
- };
-
inline int
pool_index(size_t block_size, int npools)
{
@@ -898,9 +907,10 @@ namespace pmr
{
auto p = std::lower_bound(std::begin(pool_sizes), std::end(pool_sizes),
opts.largest_required_pool_block);
- if (int npools = p - std::begin(pool_sizes))
- return npools;
- return 1;
+ const int n = p - std::begin(pool_sizes);
+ if (p == std::end(pool_sizes) || *p == opts.largest_required_pool_block)
+ return n;
+ return n + 1;
}
} // namespace
@@ -971,7 +981,11 @@ namespace pmr
_Pool* p = alloc.allocate(_M_npools);
for (int i = 0; i < _M_npools; ++i)
{
- const size_t block_size = pool_sizes[i];
+ // For last pool use largest_required_pool_block
+ const size_t block_size = (i+1 == _M_npools)
+ ? _M_opts.largest_required_pool_block
+ : pool_sizes[i];
+
// Decide on initial number of blocks per chunk.
// Always have at least 16 blocks per chunk:
const size_t min_blocks_per_chunk = 16;
diff --git a/libstdc++-v3/testsuite/20_util/unsynchronized_pool_resource/options.cc b/libstdc++-v3/testsuite/20_util/unsynchronized_pool_resource/options.cc
index bfa8a8c..a3e4c44 100644
--- a/libstdc++-v3/testsuite/20_util/unsynchronized_pool_resource/options.cc
+++ b/libstdc++-v3/testsuite/20_util/unsynchronized_pool_resource/options.cc
@@ -20,6 +20,13 @@
#include <memory_resource>
#include <testsuite_hooks.h>
+#include <testsuite_allocator.h>
+
+bool eq(const std::pmr::pool_options& lhs, const std::pmr::pool_options& rhs)
+{
+ return lhs.max_blocks_per_chunk == rhs.max_blocks_per_chunk
+ && lhs.largest_required_pool_block == rhs.largest_required_pool_block;
+}
void
test01()
@@ -30,13 +37,62 @@ test01()
VERIFY( opts.largest_required_pool_block != 0 );
std::pmr::unsynchronized_pool_resource r1(opts);
- auto [max_blocks_per_chunk, largest_required_pool_block ] = r1.options();
- VERIFY( max_blocks_per_chunk == opts.max_blocks_per_chunk );
- VERIFY( largest_required_pool_block == opts.largest_required_pool_block );
+ const auto opts1 = r1.options();
+ VERIFY( eq(opts, opts1) );
+
+ std::pmr::unsynchronized_pool_resource r2(std::pmr::pool_options{0, 0});
+ const auto opts2 = r2.options();
+ VERIFY( eq(opts, opts2) );
+}
+
+void
+test02()
+{
+ std::pmr::pool_options opts{0, 0};
+ std::size_t num_allocs = 0;
+
+ __gnu_test::memory_resource test_mr;
+
+ std::pmr::unsynchronized_pool_resource r1(opts, &test_mr);
+ opts = r1.options();
+ // opts.largest_required_pool_block should be set to the block size of
+ // the largest pool (this is a GNU extension). Confirm this by checking
+ // that allocations larger than opts.largest_required_pool_block come
+ // directly from the upstream allocator, test_mr, not from r1's pools.
+
+ // The following should result in a "large" allocation direct from upstream:
+ (void) r1.allocate(opts.largest_required_pool_block + 1);
+ num_allocs = test_mr.number_of_active_allocations();
+ // This should result in another "large" allocation direct from upstream:
+ (void) r1.allocate(opts.largest_required_pool_block + 1);
+ // Which means the number of upstream allocations should have increased:
+ VERIFY( test_mr.number_of_active_allocations() > num_allocs );
+ r1.release();
+
+ // Repeat with a user-specified block size:
+ opts.largest_required_pool_block = 64;
+ std::pmr::unsynchronized_pool_resource r2(opts, &test_mr);
+ opts = r2.options();
+ (void) r2.allocate(opts.largest_required_pool_block + 1);
+ num_allocs = test_mr.number_of_active_allocations();
+ (void) r2.allocate(opts.largest_required_pool_block + 1);
+ VERIFY( test_mr.number_of_active_allocations() > num_allocs );
+ r2.release();
+
+ // Repeat with an odd user-specified block size:
+ opts.largest_required_pool_block = 71;
+ std::pmr::unsynchronized_pool_resource r3(opts, &test_mr);
+ opts = r3.options();
+ (void) r3.allocate(opts.largest_required_pool_block + 1);
+ num_allocs = test_mr.number_of_active_allocations();
+ (void) r3.allocate(opts.largest_required_pool_block + 1);
+ VERIFY( test_mr.number_of_active_allocations() > num_allocs );
+ r3.release();
}
int
main()
{
test01();
+ test02();
}