From a40d2c7606ce49a49f7f8299fa35167e142e3259 Mon Sep 17 00:00:00 2001 From: Nilay Vaish Date: Tue, 13 Aug 2024 15:11:05 -0700 Subject: [PATCH] Remove the unified alloc feature from the HugePageFiller PiperOrigin-RevId: 662669066 Change-Id: I6aa7b178b717f8eec658fb27ad57ca0e96fb41f4 --- tcmalloc/global_stats.cc | 7 -- tcmalloc/huge_page_aware_allocator.h | 7 +- tcmalloc/huge_page_aware_allocator_fuzz.cc | 9 +- tcmalloc/huge_page_filler.h | 30 ++--- tcmalloc/huge_page_filler_fuzz.cc | 8 +- tcmalloc/huge_page_filler_test.cc | 128 +++++++-------------- tcmalloc/parameters.cc | 4 - tcmalloc/parameters.h | 2 - 8 files changed, 51 insertions(+), 144 deletions(-) diff --git a/tcmalloc/global_stats.cc b/tcmalloc/global_stats.cc index 81b9fb52a..ded0edf0d 100644 --- a/tcmalloc/global_stats.cc +++ b/tcmalloc/global_stats.cc @@ -550,10 +550,6 @@ void DumpStats(Printer* out, int level) { Parameters::huge_region_demand_based_release() ? 1 : 0); out->printf("PARAMETER tcmalloc_release_pages_from_huge_region %d\n", Parameters::release_pages_from_huge_region() ? 1 : 0); - out->printf( - "PARAMETER tcmalloc_separate_allocs_for_few_and_many_objects_spans " - "%d\n", - Parameters::separate_allocs_for_few_and_many_objects_spans()); out->printf("PARAMETER tcmalloc_use_wider_slabs %d\n", tc_globals.cpu_cache().UseWiderSlabs() ? 1 : 0); out->printf("PARAMETER tcmalloc_configure_size_class_max_capacity %d\n", @@ -756,9 +752,6 @@ void DumpStatsInPbtxt(Printer* out, int level) { Parameters::profile_sampling_interval()); region.PrintRaw("percpu_vcpu_type", PerCpuTypeString(subtle::percpu::GetRseqVcpuMode())); - region.PrintBool( - "separate_allocs_for_few_and_many_objects_spans", - Parameters::separate_allocs_for_few_and_many_objects_spans()); region.PrintBool("tcmalloc_use_wider_slabs", tc_globals.cpu_cache().UseWiderSlabs()); region.PrintBool("tcmalloc_configure_size_class_max_capacity", diff --git a/tcmalloc/huge_page_aware_allocator.h b/tcmalloc/huge_page_aware_allocator.h index 5f4ba6be3..25c63271a 100644 --- a/tcmalloc/huge_page_aware_allocator.h +++ b/tcmalloc/huge_page_aware_allocator.h @@ -122,10 +122,6 @@ class StaticForwarder { struct HugePageAwareAllocatorOptions { MemoryTag tag; HugeRegionUsageOption use_huge_region_more_often = huge_region_option(); - HugePageFillerAllocsOption allocs_for_sparse_and_dense_spans = - Parameters::separate_allocs_for_few_and_many_objects_spans() - ? HugePageFillerAllocsOption::kSeparateAllocs - : HugePageFillerAllocsOption::kUnifiedAllocs; absl::Duration huge_cache_time = Parameters::huge_cache_release_time(); }; @@ -400,8 +396,7 @@ inline HugePageAwareAllocator::HugePageAwareAllocator( : PageAllocatorInterface("HugePageAware", options.tag), unback_(*this), unback_without_lock_(*this), - filler_(options.allocs_for_sparse_and_dense_spans, unback_, - unback_without_lock_), + filler_(unback_, unback_without_lock_), regions_(options.use_huge_region_more_often), vm_allocator_(*this), metadata_allocator_(*this), diff --git a/tcmalloc/huge_page_aware_allocator_fuzz.cc b/tcmalloc/huge_page_aware_allocator_fuzz.cc index 40da9fc33..5deeabac8 100644 --- a/tcmalloc/huge_page_aware_allocator_fuzz.cc +++ b/tcmalloc/huge_page_aware_allocator_fuzz.cc @@ -85,8 +85,7 @@ void FuzzHPAA(const std::string& s) { // [1] - HugeRegionsMode. // [2] - HugeCache release time // [3:4] - Reserved. - // [5] - Determine if we use separate filler allocs based on number of - // objects per span. + // [5] - (available) // [6:12] - Reserved. // // TODO(b/271282540): Convert these to strongly typed fuzztest parameters. @@ -115,11 +114,6 @@ void FuzzHPAA(const std::string& s) { ? HugeRegionUsageOption::kDefault : HugeRegionUsageOption::kUseForAllLargeAllocs; - const HugePageFillerAllocsOption allocs_option = - static_cast(data[5]) >= 128 - ? HugePageFillerAllocsOption::kUnifiedAllocs - : HugePageFillerAllocsOption::kSeparateAllocs; - const int32_t huge_cache_release_s = std::max(data[2], 1); // data[6:12] - Reserve additional bytes for any features we might want to add @@ -134,7 +128,6 @@ void FuzzHPAA(const std::string& s) { HugePageAwareAllocatorOptions options; options.tag = tag; options.use_huge_region_more_often = huge_region_option; - options.allocs_for_sparse_and_dense_spans = allocs_option; options.huge_cache_time = absl::Seconds(huge_cache_release_s); HugePageAwareAllocator* allocator; allocator = diff --git a/tcmalloc/huge_page_filler.h b/tcmalloc/huge_page_filler.h index 8426ab9b2..fe79daa94 100644 --- a/tcmalloc/huge_page_filler.h +++ b/tcmalloc/huge_page_filler.h @@ -262,13 +262,11 @@ template class HugePageFiller { public: explicit HugePageFiller( - HugePageFillerAllocsOption allocs_option, MemoryModifyFunction& unback ABSL_ATTRIBUTE_LIFETIME_BOUND, MemoryModifyFunction& unback_without_lock ABSL_ATTRIBUTE_LIFETIME_BOUND); - HugePageFiller(Clock clock, HugePageFillerAllocsOption allocs_options, - MemoryModifyFunction& unback ABSL_ATTRIBUTE_LIFETIME_BOUND, - MemoryModifyFunction& unback_without_lock - ABSL_ATTRIBUTE_LIFETIME_BOUND); + HugePageFiller( + Clock clock, MemoryModifyFunction& unback ABSL_ATTRIBUTE_LIFETIME_BOUND, + MemoryModifyFunction& unback_without_lock ABSL_ATTRIBUTE_LIFETIME_BOUND); typedef TrackerType Tracker; @@ -446,8 +444,6 @@ class HugePageFiller { // n_used_partial_released_ is the number of pages which have been allocated // from the hugepages in the set regular_alloc_partial_released. Length n_used_partial_released_[AccessDensityPrediction::kPredictionCounts]; - const HugePageFillerAllocsOption allocs_for_sparse_and_dense_spans_ = - HugePageFillerAllocsOption::kUnifiedAllocs; // RemoveFromFillerList pt from the appropriate PageTrackerList. void RemoveFromFillerList(TrackerType* pt); @@ -639,19 +635,17 @@ inline Length PageTracker::free_pages() const { template inline HugePageFiller::HugePageFiller( - HugePageFillerAllocsOption allocs_option, MemoryModifyFunction& unback, - MemoryModifyFunction& unback_without_lock) + MemoryModifyFunction& unback, MemoryModifyFunction& unback_without_lock) : HugePageFiller(Clock{.now = absl::base_internal::CycleClock::Now, .freq = absl::base_internal::CycleClock::Frequency}, - allocs_option, unback, unback_without_lock) {} + unback, unback_without_lock) {} // For testing with mock clock template inline HugePageFiller::HugePageFiller( - Clock clock, HugePageFillerAllocsOption allocs_option, - MemoryModifyFunction& unback, MemoryModifyFunction& unback_without_lock) - : allocs_for_sparse_and_dense_spans_(allocs_option), - size_(NHugePages(0)), + Clock clock, MemoryModifyFunction& unback, + MemoryModifyFunction& unback_without_lock) + : size_(NHugePages(0)), fillerstats_tracker_(clock, absl::Minutes(10), absl::Minutes(5)), clock_(clock), unback_(unback), @@ -731,8 +725,6 @@ HugePageFiller::TryGet(Length n, SpanAllocInfo span_alloc_info) { bool was_released = false; const AccessDensityPrediction type = - ABSL_PREDICT_TRUE(allocs_for_sparse_and_dense_spans_ == - HugePageFillerAllocsOption::kSeparateAllocs) && IsDenseSpan(span_alloc_info.density) ? AccessDensityPrediction::kDense : AccessDensityPrediction::kSparse; @@ -858,8 +850,6 @@ inline void HugePageFiller::Contribute( TC_ASSERT_EQ(pt->released_pages(), Length(0)); const AccessDensityPrediction type = - ABSL_PREDICT_TRUE(allocs_for_sparse_and_dense_spans_ == - HugePageFillerAllocsOption::kSeparateAllocs) && IsDenseSpan(span_alloc_info.density) ? AccessDensityPrediction::kDense : AccessDensityPrediction::kSparse; @@ -1924,8 +1914,6 @@ inline void HugePageFiller::RemoveFromFillerList(TrackerType* pt) { size_t chunk = IndexFor(pt); size_t i = ListFor(longest, chunk); const AccessDensityPrediction type = - ABSL_PREDICT_TRUE(allocs_for_sparse_and_dense_spans_ == - HugePageFillerAllocsOption::kSeparateAllocs) && pt->HasDenseSpans() ? AccessDensityPrediction::kDense : AccessDensityPrediction::kSparse; @@ -1957,8 +1945,6 @@ inline void HugePageFiller::AddToFillerList(TrackerType* pt) { size_t i = ListFor(longest, chunk); const AccessDensityPrediction type = - ABSL_PREDICT_TRUE(allocs_for_sparse_and_dense_spans_ == - HugePageFillerAllocsOption::kSeparateAllocs) && pt->HasDenseSpans() ? AccessDensityPrediction::kDense : AccessDensityPrediction::kSparse; diff --git a/tcmalloc/huge_page_filler_fuzz.cc b/tcmalloc/huge_page_filler_fuzz.cc index f3065b39f..0a9423411 100644 --- a/tcmalloc/huge_page_filler_fuzz.cc +++ b/tcmalloc/huge_page_filler_fuzz.cc @@ -105,7 +105,7 @@ void FuzzFiller(const std::string& s) { // HugePageFiller. // // [0] - (available) - // [1] - We choose the separate_allocs_for_few_and_many_objects_spans + // [1] - (available) // [2] - (available) // // Afterwards, we read 5 bytes at a time until the buffer is exhausted. @@ -116,16 +116,10 @@ void FuzzFiller(const std::string& s) { // For example, this input can provide a Length to // allocate, or the index of the previous allocation to // deallocate. - - const HugePageFillerAllocsOption allocs_for_few_and_many_objects_spans = - static_cast(data[1]) >= 128 - ? HugePageFillerAllocsOption::kSeparateAllocs - : HugePageFillerAllocsOption::kUnifiedAllocs; data += kInitBytes; size -= kInitBytes; HugePageFiller filler(Clock{.now = mock_clock, .freq = freq}, - allocs_for_few_and_many_objects_spans, unback, unback); struct Alloc { diff --git a/tcmalloc/huge_page_filler_test.cc b/tcmalloc/huge_page_filler_test.cc index 13b9123aa..c01eedcde 100644 --- a/tcmalloc/huge_page_filler_test.cc +++ b/tcmalloc/huge_page_filler_test.cc @@ -642,8 +642,7 @@ class BlockingUnback final : public MemoryModifyFunction { thread_local absl::Mutex* BlockingUnback::mu_ = nullptr; -class FillerTest - : public testing::TestWithParam> { +class FillerTest : public testing::Test { protected: // Allow tests to modify the clock used by the cache. static int64_t FakeClock() { return clock_; } @@ -685,8 +684,7 @@ class FillerTest FillerTest() : filler_(Clock{.now = FakeClock, .freq = GetFakeClockFrequency}, - /*separate_allocs_for_sparse_and_dense_spans=*/ - std::get<0>(GetParam()), blocking_unback_, blocking_unback_) { + blocking_unback_, blocking_unback_) { ResetClock(); // Reset success state blocking_unback_.success_ = true; @@ -799,10 +797,8 @@ class FillerTest EXPECT_LT(n, kPagesPerHugePage); // Densely-accessed spans are not allocated from donated hugepages. So // assert that we do not test such a situation. - EXPECT_TRUE(std::get<0>(GetParam()) == - HugePageFillerAllocsOption::kUnifiedAllocs || - (!donated || - span_alloc_info.density == AccessDensityPrediction::kSparse)); + EXPECT_TRUE(!donated || + span_alloc_info.density == AccessDensityPrediction::kSparse); PAlloc ret; ret.n = n; ret.pt = nullptr; @@ -853,7 +849,7 @@ class FillerTest int64_t FillerTest::clock_{1234}; -TEST_P(FillerTest, Density) { +TEST_F(FillerTest, Density) { absl::BitGen rng; // Start with a really annoying setup: some hugepages half empty (randomly) std::vector allocs; @@ -871,13 +867,8 @@ TEST_P(FillerTest, Density) { for (auto d : doomed_allocs) { Delete(d); } - const HugePageFillerAllocsOption path = std::get<0>(GetParam()); - if (path == HugePageFillerAllocsOption::kUnifiedAllocs) { - EXPECT_EQ(filler_.size(), kNumHugePages); - } else { - EXPECT_LE(filler_.size(), kNumHugePages + NHugePages(1)); - EXPECT_GE(filler_.size(), kNumHugePages); - } + EXPECT_LE(filler_.size(), kNumHugePages + NHugePages(1)); + EXPECT_GE(filler_.size(), kNumHugePages); // We want a good chance of touching ~every allocation. size_t n = allocs.size(); // Now, randomly add and delete to the allocations. @@ -905,7 +896,7 @@ TEST_P(FillerTest, Density) { // This test makes sure that we continue releasing from regular (non-partial) // allocs when we enable a feature to release all free pages from partial // allocs. -TEST_P(FillerTest, ReleaseFromFullAllocs) { +TEST_F(FillerTest, ReleaseFromFullAllocs) { static const Length kAlloc = kPagesPerHugePage / 2; // Maintain the object count for the second allocation so that the alloc list // remains the same for the two allocations. @@ -944,7 +935,7 @@ TEST_P(FillerTest, ReleaseFromFullAllocs) { // even when we request fewer pages to release. It also confirms that we // continue to release desired number of pages from the full allocs even when // release_partial_alloc_pages option is enabled. -TEST_P(FillerTest, ReleaseFreePagesInPartialAllocs) { +TEST_F(FillerTest, ReleaseFreePagesInPartialAllocs) { static const Length kAlloc = kPagesPerHugePage / 2; static const Length kL1 = kAlloc - Length(1); static const Length kL2 = kAlloc + Length(1); @@ -1004,7 +995,7 @@ TEST_P(FillerTest, ReleaseFreePagesInPartialAllocs) { Delete(p3); } -TEST_P(FillerTest, AccountingForUsedPartialReleased) { +TEST_F(FillerTest, AccountingForUsedPartialReleased) { static const Length kAlloc = kPagesPerHugePage / 2; static const Length kL1 = kAlloc + Length(3); static const Length kL2 = kAlloc + Length(5); @@ -1030,7 +1021,7 @@ TEST_P(FillerTest, AccountingForUsedPartialReleased) { Delete(p2); } -TEST_P(FillerTest, Release) { +TEST_F(FillerTest, Release) { static const Length kAlloc = kPagesPerHugePage / 2; // Maintain the object count for the second allocation so that the alloc list // remains the same for the two allocations. @@ -1063,7 +1054,7 @@ TEST_P(FillerTest, Release) { EXPECT_EQ(filler_.previously_released_huge_pages(), NHugePages(0)); } -TEST_P(FillerTest, ReleaseZero) { +TEST_F(FillerTest, ReleaseZero) { // Trying to release no pages should not crash. EXPECT_EQ( ReleasePages(Length(0), @@ -1117,9 +1108,9 @@ void FillerTest::FragmentationTest() { } } -TEST_P(FillerTest, Fragmentation) { FragmentationTest(); } +TEST_F(FillerTest, Fragmentation) { FragmentationTest(); } -TEST_P(FillerTest, PrintFreeRatio) { +TEST_F(FillerTest, PrintFreeRatio) { // This test is sensitive to the number of pages per hugepage, as we are // printing raw stats. if (kPagesPerHugePage != Length(256)) { @@ -1185,7 +1176,7 @@ using testing::AnyOf; using testing::Eq; using testing::StrEq; -TEST_P(FillerTest, HugePageFrac) { +TEST_F(FillerTest, HugePageFrac) { // I don't actually care which we get, both are // reasonable choices, but don't report a NaN/complain // about divide by 0s/ give some bogus number for empty. @@ -1246,7 +1237,7 @@ TEST_P(FillerTest, HugePageFrac) { // // This test is a tool for analyzing parameters -- not intended as an actual // unit test. -TEST_P(FillerTest, DISABLED_ReleaseFrac) { +TEST_F(FillerTest, DISABLED_ReleaseFrac) { absl::BitGen rng; const Length baseline = LengthFromBytes(absl::GetFlag(FLAGS_bytes)); const Length peak = baseline * absl::GetFlag(FLAGS_growth_factor); @@ -1285,7 +1276,7 @@ TEST_P(FillerTest, DISABLED_ReleaseFrac) { // Make sure we release appropriate number of pages when using // ReleasePartialPages. -TEST_P(FillerTest, ReleasePagesFromPartialAllocs) { +TEST_F(FillerTest, ReleasePagesFromPartialAllocs) { const Length N = kPagesPerHugePage; auto big = Allocate(N - Length(2)); auto tiny1 = AllocateWithSpanAllocInfo(Length(1), big.span_alloc_info); @@ -1340,7 +1331,7 @@ TEST_P(FillerTest, ReleasePagesFromPartialAllocs) { EXPECT_EQ(filler_.unmapped_pages(), Length(0)); } -TEST_P(FillerTest, ReleaseAccounting) { +TEST_F(FillerTest, ReleaseAccounting) { const Length N = kPagesPerHugePage; auto big = Allocate(N - Length(2)); auto tiny1 = AllocateWithSpanAllocInfo(Length(1), big.span_alloc_info); @@ -1410,7 +1401,7 @@ TEST_P(FillerTest, ReleaseAccounting) { EXPECT_EQ(filler_.unmapped_pages(), Length(0)); } -TEST_P(FillerTest, ReleaseWithReuse) { +TEST_F(FillerTest, ReleaseWithReuse) { const Length N = kPagesPerHugePage; auto half = Allocate(N / 2); auto tiny1 = AllocateWithSpanAllocInfo(N / 4, half.span_alloc_info); @@ -1454,7 +1445,7 @@ TEST_P(FillerTest, ReleaseWithReuse) { EXPECT_EQ(filler_.previously_released_huge_pages(), NHugePages(0)); } -TEST_P(FillerTest, CheckPreviouslyReleasedStats) { +TEST_F(FillerTest, CheckPreviouslyReleasedStats) { const Length N = kPagesPerHugePage; auto half = Allocate(N / 2); auto tiny1 = AllocateWithSpanAllocInfo(N / 4, half.span_alloc_info); @@ -1514,7 +1505,7 @@ TEST_P(FillerTest, CheckPreviouslyReleasedStats) { } } -TEST_P(FillerTest, AvoidArbitraryQuarantineVMGrowth) { +TEST_F(FillerTest, AvoidArbitraryQuarantineVMGrowth) { const Length N = kPagesPerHugePage; // Guarantee we have a ton of released pages go empty. for (int i = 0; i < 10 * 1000; ++i) { @@ -1529,7 +1520,7 @@ TEST_P(FillerTest, AvoidArbitraryQuarantineVMGrowth) { EXPECT_LE(s.system_bytes, 1024 * 1024 * 1024); } -TEST_P(FillerTest, StronglyPreferNonDonated) { +TEST_F(FillerTest, StronglyPreferNonDonated) { // We donate several huge pages of varying fullnesses. Then we make several // allocations that would be perfect fits for the donated hugepages, *after* // making one allocation that won't fit, to ensure that a huge page is @@ -1564,7 +1555,7 @@ TEST_P(FillerTest, StronglyPreferNonDonated) { } } -TEST_P(FillerTest, SkipPartialAllocSubrelease) { +TEST_F(FillerTest, SkipPartialAllocSubrelease) { // This test is sensitive to the number of pages per hugepage, as we are // printing raw stats. if (kPagesPerHugePage != Length(256)) { @@ -1766,7 +1757,7 @@ HugePageFiller: 50.0000% of decisions confirmed correct, 0 pending (50.0000% of )")); } -TEST_P(FillerTest, SkipSubrelease) { +TEST_F(FillerTest, SkipSubrelease) { // This test is sensitive to the number of pages per hugepage, as we are // printing raw stats. if (kPagesPerHugePage != Length(256)) { @@ -1964,20 +1955,14 @@ HugePageFiller: 50.0000% of decisions confirmed correct, 0 pending (50.0000% of )")); } -TEST_P(FillerTest, LifetimeTelemetryTest) { +TEST_F(FillerTest, LifetimeTelemetryTest) { // This test is sensitive to the number of pages per hugepage, as we are // printing raw stats. if (kPagesPerHugePage != Length(256)) { GTEST_SKIP(); } - // Skip test for single alloc as we test for non-zero hardened output. - if (std::get<0>(GetParam()) == HugePageFillerAllocsOption::kUnifiedAllocs) { - GTEST_SKIP() << "Skipping test for kUnifiedAllocs"; - } - const Length N = kPagesPerHugePage; - SpanAllocInfo info_sparsely_accessed = {1, AccessDensityPrediction::kSparse}; PAlloc small_alloc = AllocateWithSpanAllocInfo(N / 4, info_sparsely_accessed); PAlloc large_alloc = @@ -2139,7 +2124,7 @@ HugePageFiller: <254<= 0 <255<= 0 Delete(large_alloc); } -TEST_P(FillerTest, SkipSubReleaseDemandPeak) { +TEST_F(FillerTest, SkipSubReleaseDemandPeak) { // Tests that HugePageFiller can cap filler's short-term long-term // skip-subrelease mechanism using the demand measured by subrelease // intervals. @@ -2180,7 +2165,7 @@ TEST_P(FillerTest, SkipSubReleaseDemandPeak) { Delete(half1c); } -TEST_P(FillerTest, ReportSkipSubreleases) { +TEST_F(FillerTest, ReportSkipSubreleases) { // Tests that HugePageFiller reports skipped subreleases using demand // requirement that is the smaller of two (recent peak and its // current capacity). This fix makes evaluating skip subrelease more accurate, @@ -2314,7 +2299,7 @@ std::vector FillerTest::GenerateInterestingAllocs() { // Testing subrelase stats: ensure that the cumulative number of released // pages and broken hugepages is no less than those of the last 10 mins -TEST_P(FillerTest, CheckSubreleaseStats) { +TEST_F(FillerTest, CheckSubreleaseStats) { // Get lots of hugepages into the filler. Advance(absl::Minutes(1)); std::vector result; @@ -2410,7 +2395,7 @@ TEST_P(FillerTest, CheckSubreleaseStats) { } } -TEST_P(FillerTest, ConstantBrokenHugePages) { +TEST_F(FillerTest, ConstantBrokenHugePages) { // Get and Fill up many huge pages const HugeLength kHugePages = NHugePages(10 * kPagesPerHugePage.raw_num()); @@ -2475,7 +2460,7 @@ TEST_P(FillerTest, ConstantBrokenHugePages) { // Confirms that a timeseries that contains every epoch does not exceed the // expected buffer capacity of 1 MiB. -TEST_P(FillerTest, CheckBufferSize) { +TEST_F(FillerTest, CheckBufferSize) { const int kEpochs = 600; const absl::Duration kEpochLength = absl::Seconds(1); @@ -2503,7 +2488,7 @@ TEST_P(FillerTest, CheckBufferSize) { EXPECT_LE(buffer_size, 1024 * 1024); } -TEST_P(FillerTest, ReleasePriority) { +TEST_F(FillerTest, ReleasePriority) { // Fill up many huge pages (>> kPagesPerHugePage). This relies on an // implementation detail of ReleasePages buffering up at most // kPagesPerHugePage as potential release candidates. @@ -2604,7 +2589,7 @@ TEST_P(FillerTest, ReleasePriority) { } } -TEST_P(FillerTest, b258965495) { +TEST_F(FillerTest, b258965495) { // 1 huge page: 2 pages allocated, kPagesPerHugePage-2 free, 0 released auto a1 = Allocate(Length(2)); EXPECT_EQ(filler_.size(), NHugePages(1)); @@ -2634,11 +2619,7 @@ TEST_P(FillerTest, b258965495) { Delete(a1); } -TEST_P(FillerTest, CheckFillerStats) { - // Skip test for single alloc as we test for non-zero hardened output. - if (std::get<0>(GetParam()) == HugePageFillerAllocsOption::kUnifiedAllocs) { - GTEST_SKIP() << "Skipping test for kUnifiedAllocs"; - } +TEST_F(FillerTest, CheckFillerStats) { if (kPagesPerHugePage != Length(256)) { // The output is hardcoded on this assumption, and dynamically calculating // it would be way too much of a pain. @@ -2698,11 +2679,7 @@ TEST_P(FillerTest, CheckFillerStats) { // Test the output of Print(). This is something of a change-detector test, // but that's not all bad in this case. -TEST_P(FillerTest, Print) { - // Skip test for single alloc as we test for non-zero hardened output. - if (std::get<0>(GetParam()) == HugePageFillerAllocsOption::kUnifiedAllocs) { - GTEST_SKIP() << "Skipping test for kUnifiedAllocs"; - } +TEST_F(FillerTest, Print) { if (kPagesPerHugePage != Length(256)) { // The output is hardcoded on this assumption, and dynamically calculating // it would be way too much of a pain. @@ -3085,13 +3062,7 @@ HugePageFiller: Subrelease stats last 10 min: total 282 pages subreleased (0 pag // Test Get and Put operations on the filler work correctly when number of // objects are provided. We expect that Get requests with sparsely-accessed // and densely-accessed spans are satisfied by their respective allocs. -TEST_P(FillerTest, GetsAndPuts) { - // TODO(b/295252832): remove the skipping part once the two separate allocs - // become the only option. - if (std::get<0>(GetParam()) == HugePageFillerAllocsOption::kUnifiedAllocs) { - GTEST_SKIP() << "Skipping test for kUnifiedAllocs"; - } - +TEST_F(FillerTest, GetsAndPuts) { randomize_density_ = false; absl::BitGen rng; std::vector sparsely_accessed_allocs; @@ -3136,13 +3107,7 @@ TEST_P(FillerTest, GetsAndPuts) { // Test that filler tries to release pages from the sparsely-accessed allocs // before attempting to release pages from the densely-accessed allocs. -TEST_P(FillerTest, ReleasePrioritySparseAndDenseAllocs) { - // TODO(b/295252832): remove the skipping part once the two separate allocs - // become the only option. - if (std::get<0>(GetParam()) == HugePageFillerAllocsOption::kUnifiedAllocs) { - GTEST_SKIP() << "Skipping test for kUnifiedAllocs"; - } - +TEST_F(FillerTest, ReleasePrioritySparseAndDenseAllocs) { randomize_density_ = false; const Length N = kPagesPerHugePage; const Length kToBeReleased(4); @@ -3167,7 +3132,7 @@ TEST_P(FillerTest, ReleasePrioritySparseAndDenseAllocs) { // back down by random deletion. Then release partial hugepages until // pageheap is bounded by some fraction of usage. Measure the blowup in VSS // footprint. -TEST_P(FillerTest, BoundedVSS) { +TEST_F(FillerTest, BoundedVSS) { randomize_density_ = false; absl::BitGen rng; const Length baseline = LengthFromBytes(absl::GetFlag(FLAGS_bytes)); @@ -3205,15 +3170,7 @@ TEST_P(FillerTest, BoundedVSS) { // In b/265337869, we observed failures in the huge_page_filler due to mixing // of hugepages between sparsely-accessed and densely-accessed allocs. The test // below reproduces the buggy situation. -TEST_P(FillerTest, CounterUnderflow) { - // The test so specifically needs that both the alloc lists are used. So, we - // skip when using a single alloc list. - // TODO(b/295252832): remove the skipping part once the two separate allocs - // become the only option. - if (std::get<0>(GetParam()) == HugePageFillerAllocsOption::kUnifiedAllocs) { - GTEST_SKIP() << "Skipping test for single alloc"; - } - +TEST_F(FillerTest, CounterUnderflow) { randomize_density_ = false; const Length N = kPagesPerHugePage; const Length kToBeReleased(kPagesPerHugePage / 2 + Length(1)); @@ -3241,7 +3198,7 @@ TEST_P(FillerTest, CounterUnderflow) { // presence of partially released and fully released pages in densely-accessed // alloc. The comparator in use does not make correct choices in presence of // such hugepages. The test below reproduces the buggy situation. -TEST_P(FillerTest, ReleasePagesFromDenseAlloc) { +TEST_F(FillerTest, ReleasePagesFromDenseAlloc) { randomize_density_ = false; constexpr size_t kCandidatesForReleasingMemory = HugePageFiller::kCandidatesForReleasingMemory; @@ -3280,7 +3237,7 @@ TEST_P(FillerTest, ReleasePagesFromDenseAlloc) { } } -TEST_P(FillerTest, ReleasedPagesStatistics) { +TEST_F(FillerTest, ReleasedPagesStatistics) { constexpr Length N = kPagesPerHugePage / 4; PAlloc a1 = Allocate(N); @@ -3320,11 +3277,6 @@ TEST_P(FillerTest, ReleasedPagesStatistics) { Delete(a1); } -INSTANTIATE_TEST_SUITE_P(All, FillerTest, - testing::Combine(testing::Values( - HugePageFillerAllocsOption::kUnifiedAllocs, - HugePageFillerAllocsOption::kSeparateAllocs))); - TEST(SkipSubreleaseIntervalsTest, EmptyIsNotEnabled) { // When we have a limit hit, we pass SkipSubreleaseIntervals{} to the // filler. Make sure it doesn't signal that we should skip the limit. diff --git a/tcmalloc/parameters.cc b/tcmalloc/parameters.cc index f6a21d8c9..eca433480 100644 --- a/tcmalloc/parameters.cc +++ b/tcmalloc/parameters.cc @@ -296,10 +296,6 @@ bool Parameters::resize_size_class_max_capacity() { std::memory_order_relaxed); } -bool Parameters::separate_allocs_for_few_and_many_objects_spans() { - return true; -} - int32_t Parameters::max_per_cpu_cache_size() { return tc_globals.cpu_cache().CacheLimit(); } diff --git a/tcmalloc/parameters.h b/tcmalloc/parameters.h index 6a1dafc86..70df2c2d6 100644 --- a/tcmalloc/parameters.h +++ b/tcmalloc/parameters.h @@ -201,8 +201,6 @@ class Parameters { TCMalloc_Internal_SetPerCpuCachesDynamicSlabShrinkThreshold(value); } - static bool separate_allocs_for_few_and_many_objects_spans(); - private: friend void ::TCMalloc_Internal_SetBackgroundReleaseRate(size_t v); friend void ::TCMalloc_Internal_SetGuardedSamplingInterval(int64_t v);