diff --git a/tcmalloc/guarded_page_allocator.cc b/tcmalloc/guarded_page_allocator.cc index 6a36dda5b..1aa304a61 100644 --- a/tcmalloc/guarded_page_allocator.cc +++ b/tcmalloc/guarded_page_allocator.cc @@ -184,13 +184,16 @@ void GuardedPageAllocator::Print(Printer* out) { "Slots Currently Allocated: %zu\n" "Slots Currently Quarantined: %zu\n" "Maximum Slots Allocated: %zu / %zu\n" + "StackTraceFilter Max Slots Used: %zu\n" + "StackTraceFilter Replacement Inserts: %zu\n" "PARAMETER tcmalloc_guarded_sample_parameter %d\n" // TODO(b/263387812): remove when experiment is finished "PARAMETER tcmalloc_improved_guarded_sampling %d\n", num_allocation_requests_ - num_failed_allocations_, num_failed_allocations_, num_alloced_pages_, total_pages_ - num_alloced_pages_, num_alloced_pages_max_, - max_alloced_pages_, GetChainedRate(), + max_alloced_pages_, tc_globals.stacktrace_filter().max_slots_used(), + tc_globals.stacktrace_filter().replacement_inserts(), GetChainedRate(), Parameters::improved_guarded_sampling()); } @@ -204,6 +207,10 @@ void GuardedPageAllocator::PrintInPbtxt(PbtxtRegion* gwp_asan) { total_pages_ - num_alloced_pages_); gwp_asan->PrintI64("max_slots_allocated", num_alloced_pages_max_); gwp_asan->PrintI64("allocated_slot_limit", max_alloced_pages_); + gwp_asan->PrintI64("stack_trace_filter_max_slots_used", + tc_globals.stacktrace_filter().max_slots_used()); + gwp_asan->PrintI64("stack_trace_filter_replacement_inserts", + tc_globals.stacktrace_filter().replacement_inserts()); gwp_asan->PrintI64("tcmalloc_guarded_sample_parameter", GetChainedRate()); // TODO(b/263387812): remove when experiment is finished gwp_asan->PrintI64("tcmalloc_improved_guarded_sampling", diff --git a/tcmalloc/internal/stacktrace_filter.h b/tcmalloc/internal/stacktrace_filter.h index a3a45f78f..00eea9aff 100644 --- a/tcmalloc/internal/stacktrace_filter.h +++ b/tcmalloc/internal/stacktrace_filter.h @@ -44,12 +44,20 @@ class StackTraceFilter { size_t Count(const StackTrace& stacktrace) const; void Add(const StackTrace& stacktrace); + size_t max_slots_used() const { + return max_slots_used_.load(std::memory_order_relaxed); + } + size_t replacement_inserts() const { + return replacement_inserts_.load(std::memory_order_relaxed); + } private: constexpr static size_t kMask = 0xFF; constexpr static size_t kHashCountLimit = kMask; constexpr static int kSize = kMask + 1; std::atomic stack_hashes_with_count_[kSize]{0}; + std::atomic max_slots_used_{0}; + std::atomic replacement_inserts_{0}; inline size_t HashOfStackTrace(const StackTrace& stacktrace) const { return absl::HashOf( @@ -100,6 +108,11 @@ inline void StackTraceFilter::Add(const StackTrace& stacktrace) { stack_hashes_with_count_[stack_hash % kSize].store( (stack_hash & ~kMask) | count, std::memory_order_relaxed); } else { + if (existing_stack_hash_with_count == 0) { + max_slots_used_.fetch_add(1, std::memory_order_relaxed); + } else { + replacement_inserts_.fetch_add(1, std::memory_order_relaxed); + } // New stack_hash being placed in (unoccupied entry || existing entry) stack_hashes_with_count_[stack_hash % kSize].store( (stack_hash & ~kMask) | 1, std::memory_order_relaxed); diff --git a/tcmalloc/internal/stacktrace_filter_test.cc b/tcmalloc/internal/stacktrace_filter_test.cc index f012e4948..a474ecc56 100644 --- a/tcmalloc/internal/stacktrace_filter_test.cc +++ b/tcmalloc/internal/stacktrace_filter_test.cc @@ -170,13 +170,19 @@ TEST_F(StackTraceFilterTest, CountNew) { TEST_F(StackTraceFilterTest, CountDifferent) { InitializeColliderStackTrace(); filter_.Add(stacktrace1_); + EXPECT_EQ(1, filter_.max_slots_used()); + EXPECT_EQ(0, filter_.replacement_inserts()); EXPECT_EQ(0, filter_.Count(collider_stacktrace_)); } TEST_F(StackTraceFilterTest, Add) { filter_.Add(stacktrace1_); + EXPECT_EQ(1, filter_.max_slots_used()); + EXPECT_EQ(0, filter_.replacement_inserts()); EXPECT_EQ(1, filter_.Count(stacktrace1_)); filter_.Add(stacktrace1_); + EXPECT_EQ(1, filter_.max_slots_used()); + EXPECT_EQ(0, filter_.replacement_inserts()); EXPECT_EQ(2, filter_.Count(stacktrace1_)); } @@ -184,18 +190,28 @@ TEST_F(StackTraceFilterTest, AddCountLimitReached) { while (count(stacktrace1_) < filter_hash_count_limit()) { filter_.Add(stacktrace1_); } + EXPECT_EQ(1, filter_.max_slots_used()); + EXPECT_EQ(0, filter_.replacement_inserts()); EXPECT_EQ(filter_hash_count_limit(), filter_.Count(stacktrace1_)); filter_.Add(stacktrace1_); + EXPECT_EQ(1, filter_.max_slots_used()); + EXPECT_EQ(0, filter_.replacement_inserts()); EXPECT_EQ(filter_hash_count_limit(), filter_.Count(stacktrace1_)); } TEST_F(StackTraceFilterTest, AddReplace) { InitializeColliderStackTrace(); filter_.Add(stacktrace1_); + EXPECT_EQ(1, filter_.max_slots_used()); + EXPECT_EQ(0, filter_.replacement_inserts()); EXPECT_EQ(1, filter_.Count(stacktrace1_)); filter_.Add(stacktrace1_); + EXPECT_EQ(1, filter_.max_slots_used()); + EXPECT_EQ(0, filter_.replacement_inserts()); EXPECT_EQ(2, filter_.Count(stacktrace1_)); filter_.Add(collider_stacktrace_); + EXPECT_EQ(1, filter_.max_slots_used()); + EXPECT_EQ(1, filter_.replacement_inserts()); EXPECT_EQ(0, filter_.Count(stacktrace1_)); EXPECT_EQ(1, filter_.Count(collider_stacktrace_)); }