Revert "[sanitizer_common] Recycle StackDepot memory"

This reverts commit 78804e6b20.
This commit is contained in:
Jianzhou Zhao 2021-05-05 00:57:34 +00:00
parent 1fb612d060
commit bf4e1cf80a
4 changed files with 3 additions and 96 deletions

View File

@ -115,10 +115,6 @@ void StackDepotUnlockAll() {
theDepot.UnlockAll();
}
void StackDepotFree() {
theDepot.Free();
}
void StackDepotPrintAll() {
#if !SANITIZER_GO
theDepot.PrintAll();

View File

@ -39,7 +39,6 @@ StackDepotHandle StackDepotPut_WithHandle(StackTrace stack);
// Retrieves a stored stack trace by the id.
StackTrace StackDepotGet(u32 id);
void StackDepotFree();
void StackDepotLockAll();
void StackDepotUnlockAll();
void StackDepotPrintAll();

View File

@ -37,15 +37,12 @@ class StackDepotBase {
void LockAll();
void UnlockAll();
void PrintAll();
void Free();
private:
static Node *find(Node *s, args_type args, u32 hash);
static Node *lock(atomic_uintptr_t *p);
static void unlock(atomic_uintptr_t *p, Node *s);
Node *alloc(uptr part, uptr memsz);
static const int kTabSize = 1 << kTabSizeLog; // Hash table size.
static const int kPartBits = 8;
static const int kPartShift = sizeof(u32) * 8 - kPartBits - kReservedBits;
@ -56,7 +53,6 @@ class StackDepotBase {
atomic_uintptr_t tab[kTabSize]; // Hash table of Node's.
atomic_uint32_t seq[kPartCount]; // Unique id generators.
atomic_uintptr_t freeNodes[kPartCount];
StackDepotStats stats;
@ -99,57 +95,6 @@ void StackDepotBase<Node, kReservedBits, kTabSizeLog>::unlock(
atomic_store(p, (uptr)s, memory_order_release);
}
template <class Node, int kReservedBits, int kTabSizeLog>
void StackDepotBase<Node, kReservedBits, kTabSizeLog>::Free() {
LockAll();
for (int i = 0; i < kPartCount; ++i) {
lock(&freeNodes[i]);
}
for (int i = 0; i < kTabSize; ++i) {
atomic_uintptr_t *p_tab = &tab[i];
Node *s = (Node *)(atomic_load(p_tab, memory_order_relaxed) & ~1UL);
while (s) {
uptr part = s->id >> kPartShift;
atomic_uintptr_t *p_free_nodes = &freeNodes[part];
Node *free_nodes_head =
(Node *)(atomic_load(p_free_nodes, memory_order_relaxed) & ~1UL);
Node *next = s->link;
s->link = free_nodes_head;
atomic_store(p_free_nodes, (uptr)s, memory_order_release);
s = next;
}
atomic_store(p_tab, (uptr)nullptr, memory_order_release);
}
stats.n_uniq_ids = 0;
for (int i = 0; i < kPartCount; ++i)
(void)atomic_exchange(&seq[i], 0, memory_order_relaxed);
for (int i = kPartCount - 1; i >= 0; --i) {
atomic_uintptr_t *p = &freeNodes[i];
uptr s = atomic_load(p, memory_order_relaxed);
unlock(p, (Node *)(s & ~1UL));
}
UnlockAll();
}
template <class Node, int kReservedBits, int kTabSizeLog>
Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::alloc(uptr part,
uptr memsz) {
atomic_uintptr_t *p = &freeNodes[part];
Node *head = lock(p);
if (head) {
unlock(p, head->link);
return head;
}
unlock(p, head);
Node *s = (Node *)PersistentAlloc(memsz);
stats.allocated += memsz;
return s;
}
template <class Node, int kReservedBits, int kTabSizeLog>
typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::handle_type
StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args,
@ -180,7 +125,8 @@ StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args,
CHECK_NE(id, 0);
CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
uptr memsz = Node::storage_size(args);
s = alloc(part, memsz);
s = (Node *)PersistentAlloc(memsz);
stats.allocated += memsz;
s->id = id;
s->store(args, h);
s->link = s2;
@ -222,7 +168,7 @@ void StackDepotBase<Node, kReservedBits, kTabSizeLog>::LockAll() {
template <class Node, int kReservedBits, int kTabSizeLog>
void StackDepotBase<Node, kReservedBits, kTabSizeLog>::UnlockAll() {
for (int i = kTabSize - 1; i >= 0; --i) {
for (int i = 0; i < kTabSize; ++i) {
atomic_uintptr_t *p = &tab[i];
uptr s = atomic_load(p, memory_order_relaxed);
unlock(p, (Node *)(s & ~1UL));

View File

@ -111,38 +111,4 @@ TEST(SanitizerCommon, StackDepotReverseMap) {
}
}
TEST(SanitizerCommon, StackDepotFree) {
uptr array[] = {1, 2, 3, 4, 5};
StackTrace s1(array, ARRAY_SIZE(array));
u32 i1 = StackDepotPut(s1);
StackTrace stack = StackDepotGet(i1);
EXPECT_NE(stack.trace, (uptr*)0);
EXPECT_EQ(ARRAY_SIZE(array), stack.size);
EXPECT_EQ(0, internal_memcmp(stack.trace, array, sizeof(array)));
StackDepotStats *stats_before_free = StackDepotGetStats();
EXPECT_EQ(1U, stats_before_free->n_uniq_ids);
EXPECT_NE(0U, stats_before_free->allocated);
StackDepotFree();
StackDepotStats *stats_after_free = StackDepotGetStats();
EXPECT_EQ(0U, stats_after_free->n_uniq_ids);
EXPECT_EQ(stats_before_free->allocated, stats_after_free->allocated);
stack = StackDepotGet(i1);
EXPECT_EQ((uptr*)0, stack.trace);
EXPECT_EQ(i1, StackDepotPut(s1));
StackDepotStats *stats_after_2nd_put = StackDepotGetStats();
EXPECT_EQ(1U, stats_after_2nd_put->n_uniq_ids);
EXPECT_EQ(stats_after_2nd_put->allocated, stats_after_free->allocated);
stack = StackDepotGet(i1);
EXPECT_NE(stack.trace, (uptr*)0);
EXPECT_EQ(ARRAY_SIZE(array), stack.size);
EXPECT_EQ(0, internal_memcmp(stack.trace, array, sizeof(array)));
}
} // namespace __sanitizer