diff options
Diffstat (limited to 'libsanitizer/sanitizer_common/sanitizer_stackdepotbase.h')
-rw-r--r-- | libsanitizer/sanitizer_common/sanitizer_stackdepotbase.h | 170 |
1 files changed, 84 insertions, 86 deletions
diff --git a/libsanitizer/sanitizer_common/sanitizer_stackdepotbase.h b/libsanitizer/sanitizer_common/sanitizer_stackdepotbase.h index 435f634..96d1ddc 100644 --- a/libsanitizer/sanitizer_common/sanitizer_stackdepotbase.h +++ b/libsanitizer/sanitizer_common/sanitizer_stackdepotbase.h @@ -16,72 +16,87 @@ #include <stdio.h> #include "sanitizer_atomic.h" +#include "sanitizer_flat_map.h" #include "sanitizer_internal_defs.h" #include "sanitizer_mutex.h" -#include "sanitizer_persistent_allocator.h" namespace __sanitizer { template <class Node, int kReservedBits, int kTabSizeLog> class StackDepotBase { + static constexpr u32 kIdSizeLog = + sizeof(u32) * 8 - Max(kReservedBits, 1 /* At least 1 bit for locking. */); + static constexpr u32 kNodesSize1Log = kIdSizeLog / 2; + static constexpr u32 kNodesSize2Log = kIdSizeLog - kNodesSize1Log; + static constexpr int kTabSize = 1 << kTabSizeLog; // Hash table size. + static constexpr u32 kUnlockMask = (1ull << kIdSizeLog) - 1; + static constexpr u32 kLockMask = ~kUnlockMask; + public: typedef typename Node::args_type args_type; typedef typename Node::handle_type handle_type; typedef typename Node::hash_type hash_type; + + static constexpr u64 kNodesSize1 = 1ull << kNodesSize1Log; + static constexpr u64 kNodesSize2 = 1ull << kNodesSize2Log; + // Maps stack trace to an unique id. - handle_type Put(args_type args, bool *inserted = nullptr); + u32 Put(args_type args, bool *inserted = nullptr); // Retrieves a stored stack trace by the id. args_type Get(u32 id); - StackDepotStats GetStats() const { return stats; } + StackDepotStats GetStats() const { + return { + atomic_load_relaxed(&n_uniq_ids), + nodes.MemoryUsage() + Node::allocated(), + }; + } void LockAll(); void UnlockAll(); void PrintAll(); - private: - static Node *find(Node *s, args_type args, hash_type hash); - static Node *lock(atomic_uintptr_t *p); - static void unlock(atomic_uintptr_t *p, Node *s); + void TestOnlyUnmap() { + nodes.TestOnlyUnmap(); + internal_memset(this, 0, sizeof(*this)); + } - static const int kTabSize = 1 << kTabSizeLog; // Hash table size. - static const int kPartBits = 8; - static const int kPartShift = sizeof(u32) * 8 - kPartBits - kReservedBits; - static const int kPartCount = - 1 << kPartBits; // Number of subparts in the table. - static const int kPartSize = kTabSize / kPartCount; - static const int kMaxId = 1 << kPartShift; + private: + friend Node; + u32 find(u32 s, args_type args, hash_type hash) const; + static u32 lock(atomic_uint32_t *p); + static void unlock(atomic_uint32_t *p, u32 s); + atomic_uint32_t tab[kTabSize]; // Hash table of Node's. - atomic_uintptr_t tab[kTabSize]; // Hash table of Node's. - atomic_uint32_t seq[kPartCount]; // Unique id generators. + atomic_uint32_t n_uniq_ids; - StackDepotStats stats; + TwoLevelMap<Node, kNodesSize1, kNodesSize2> nodes; friend class StackDepotReverseMap; }; template <class Node, int kReservedBits, int kTabSizeLog> -Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::find(Node *s, - args_type args, - hash_type hash) { +u32 StackDepotBase<Node, kReservedBits, kTabSizeLog>::find( + u32 s, args_type args, hash_type hash) const { // Searches linked list s for the stack, returns its id. - for (; s; s = s->link) { - if (s->eq(hash, args)) { + for (; s;) { + const Node &node = nodes[s]; + if (node.eq(hash, args)) return s; - } + s = node.link; } - return nullptr; + return 0; } template <class Node, int kReservedBits, int kTabSizeLog> -Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::lock( - atomic_uintptr_t *p) { +u32 StackDepotBase<Node, kReservedBits, kTabSizeLog>::lock(atomic_uint32_t *p) { // Uses the pointer lsb as mutex. for (int i = 0;; i++) { - uptr cmp = atomic_load(p, memory_order_relaxed); - if ((cmp & 1) == 0 && - atomic_compare_exchange_weak(p, &cmp, cmp | 1, memory_order_acquire)) - return (Node *)cmp; + u32 cmp = atomic_load(p, memory_order_relaxed); + if ((cmp & kLockMask) == 0 && + atomic_compare_exchange_weak(p, &cmp, cmp | kLockMask, + memory_order_acquire)) + return cmp; if (i < 10) proc_yield(10); else @@ -91,73 +106,57 @@ Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::lock( template <class Node, int kReservedBits, int kTabSizeLog> void StackDepotBase<Node, kReservedBits, kTabSizeLog>::unlock( - atomic_uintptr_t *p, Node *s) { - DCHECK_EQ((uptr)s & 1, 0); - atomic_store(p, (uptr)s, memory_order_release); + atomic_uint32_t *p, u32 s) { + DCHECK_EQ(s & kLockMask, 0); + atomic_store(p, s, memory_order_release); } template <class Node, int kReservedBits, int kTabSizeLog> -typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::handle_type -StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args, - bool *inserted) { - if (inserted) *inserted = false; - if (!Node::is_valid(args)) return handle_type(); +u32 StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args, + bool *inserted) { + if (inserted) + *inserted = false; + if (!LIKELY(Node::is_valid(args))) + return 0; hash_type h = Node::hash(args); - atomic_uintptr_t *p = &tab[h % kTabSize]; - uptr v = atomic_load(p, memory_order_consume); - Node *s = (Node *)(v & ~1); + atomic_uint32_t *p = &tab[h % kTabSize]; + u32 v = atomic_load(p, memory_order_consume); + u32 s = v & kUnlockMask; // First, try to find the existing stack. - Node *node = find(s, args, h); - if (node) return node->get_handle(); + u32 node = find(s, args, h); + if (LIKELY(node)) + return node; + // If failed, lock, retry and insert new. - Node *s2 = lock(p); + u32 s2 = lock(p); if (s2 != s) { node = find(s2, args, h); if (node) { unlock(p, s2); - return node->get_handle(); + return node; } } - uptr part = (h % kTabSize) / kPartSize; - u32 id = atomic_fetch_add(&seq[part], 1, memory_order_relaxed) + 1; - stats.n_uniq_ids++; - CHECK_LT(id, kMaxId); - id |= part << kPartShift; - CHECK_NE(id, 0); - CHECK_EQ(id & (((u32)-1) >> kReservedBits), id); - uptr memsz = Node::storage_size(args); - s = (Node *)PersistentAlloc(memsz); - stats.allocated += memsz; - s->id = id; - s->store(args, h); - s->link = s2; + s = atomic_fetch_add(&n_uniq_ids, 1, memory_order_relaxed) + 1; + CHECK_EQ(s & kUnlockMask, s); + CHECK_EQ(s & (((u32)-1) >> kReservedBits), s); + Node &new_node = nodes[s]; + new_node.store(s, args, h); + new_node.link = s2; unlock(p, s); if (inserted) *inserted = true; - return s->get_handle(); + return s; } template <class Node, int kReservedBits, int kTabSizeLog> typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::args_type StackDepotBase<Node, kReservedBits, kTabSizeLog>::Get(u32 id) { - if (id == 0) { + if (id == 0) return args_type(); - } CHECK_EQ(id & (((u32)-1) >> kReservedBits), id); - // High kPartBits contain part id, so we need to scan at most kPartSize lists. - uptr part = id >> kPartShift; - for (int i = 0; i != kPartSize; i++) { - uptr idx = part * kPartSize + i; - CHECK_LT(idx, kTabSize); - atomic_uintptr_t *p = &tab[idx]; - uptr v = atomic_load(p, memory_order_consume); - Node *s = (Node *)(v & ~1); - for (; s; s = s->link) { - if (s->id == id) { - return s->load(); - } - } - } - return args_type(); + if (!nodes.contains(id)) + return args_type(); + const Node &node = nodes[id]; + return node.load(id); } template <class Node, int kReservedBits, int kTabSizeLog> @@ -170,24 +169,23 @@ void StackDepotBase<Node, kReservedBits, kTabSizeLog>::LockAll() { template <class Node, int kReservedBits, int kTabSizeLog> void StackDepotBase<Node, kReservedBits, kTabSizeLog>::UnlockAll() { for (int i = 0; i < kTabSize; ++i) { - atomic_uintptr_t *p = &tab[i]; + atomic_uint32_t *p = &tab[i]; uptr s = atomic_load(p, memory_order_relaxed); - unlock(p, (Node *)(s & ~1UL)); + unlock(p, s & kUnlockMask); } } template <class Node, int kReservedBits, int kTabSizeLog> void StackDepotBase<Node, kReservedBits, kTabSizeLog>::PrintAll() { for (int i = 0; i < kTabSize; ++i) { - atomic_uintptr_t *p = &tab[i]; - lock(p); - uptr v = atomic_load(p, memory_order_relaxed); - Node *s = (Node *)(v & ~1UL); - for (; s; s = s->link) { - Printf("Stack for id %u:\n", s->id); - s->load().Print(); + atomic_uint32_t *p = &tab[i]; + u32 s = atomic_load(p, memory_order_consume) & kUnlockMask; + for (; s;) { + const Node &node = nodes[s]; + Printf("Stack for id %u:\n", s); + node.load(s).Print(); + s = node.link; } - unlock(p, s); } } |