From 530044ceb273dea529323c8debead860524021ec Mon Sep 17 00:00:00 2001 From: Chris Kennelly Date: Wed, 11 Dec 2024 15:23:07 -0800 Subject: [PATCH] Deflake residency test. Address Sanitizer may place a mapping directly after ours (even with MAP_FIXED), so we address this by extending the padding of the mapping. We add mlock as well to avoid any potential page swapping. PiperOrigin-RevId: 705260754 Change-Id: I252316efeef47d2e65d0e22a8a780b5cccb9be8d --- tcmalloc/internal/residency.h | 7 ++++ tcmalloc/internal/residency_test.cc | 57 +++++++++++++++++------------ 2 files changed, 41 insertions(+), 23 deletions(-) diff --git a/tcmalloc/internal/residency.h b/tcmalloc/internal/residency.h index 49f7d12b6..691b1dd93 100644 --- a/tcmalloc/internal/residency.h +++ b/tcmalloc/internal/residency.h @@ -20,6 +20,7 @@ #include #include +#include #include "absl/status/status.h" #include "tcmalloc/internal/config.h" @@ -82,6 +83,12 @@ class Residency { const int fd_; }; +inline std::ostream& operator<<(std::ostream& stream, + const Residency::Info& rhs) { + return stream << "{.resident = " << rhs.bytes_resident + << ", .swapped = " << rhs.bytes_swapped << "}"; +} + } // namespace tcmalloc_internal } // namespace tcmalloc GOOGLE_MALLOC_SECTION_END diff --git a/tcmalloc/internal/residency_test.cc b/tcmalloc/internal/residency_test.cc index 5603a0159..046d2a552 100644 --- a/tcmalloc/internal/residency_test.cc +++ b/tcmalloc/internal/residency_test.cc @@ -54,55 +54,66 @@ TEST(ResidenceTest, ThisProcess) { const size_t kPageSize = GetPageSize(); const int kNumPages = 16; -#ifdef ABSL_HAVE_THREAD_SANITIZER - // TSAN completely ignores hints unless you ask really nicely. - int base = MAP_FIXED; - - // Minimize the chance of a race between munmap and a new mmap. - void* const mmap_hint = reinterpret_cast(0x000DEAD0000); -#else - // ASAN, among others, needs a different memory mapping. - void* const mmap_hint = reinterpret_cast(0x00007BADDEAD0000); - - int base = 0; -#endif // Try both private and shared mappings to make sure we have the bit order of // /proc/pid/pageflags correct. - for (const int flags : {base | MAP_ANONYMOUS | MAP_SHARED, - base | MAP_ANONYMOUS | MAP_PRIVATE}) { + for (const int flags : + {MAP_ANONYMOUS | MAP_SHARED, MAP_ANONYMOUS | MAP_PRIVATE}) { + const size_t kHead = kPageSize * 10; + const size_t kTail = kPageSize * 10; + Residency r; // Overallocate kNumPages of memory, so we can munmap the page before and // after it. - void* p = mmap(mmap_hint, (kNumPages + 2) * kPageSize, + void* p = mmap(nullptr, kNumPages * kPageSize + kHead + kTail, PROT_READ | PROT_WRITE, flags, -1, 0); ASSERT_NE(p, MAP_FAILED) << errno; + EXPECT_THAT(r.Get(p, (kNumPages + 2) * kPageSize), Optional(FieldsAre(0, 0))); - if (p != mmap_hint) { - absl::FPrintF(stderr, - "failed to move test mapping out of the way; we might fail " - "due to race\n"); - } ASSERT_EQ(munmap(p, kPageSize), 0); - void* q = reinterpret_cast(p) + kPageSize; - void* last = reinterpret_cast(p) + (kNumPages + 1) * kPageSize; + void* q = reinterpret_cast(p) + kHead; + void* last = reinterpret_cast(p) + kNumPages * kPageSize + kHead; ASSERT_EQ(munmap(last, kPageSize), 0); + EXPECT_THAT(r.Get(p, kHead), Optional(FieldsAre(0, 0))); + EXPECT_THAT(r.Get(last, kTail), Optional(FieldsAre(0, 0))); + memset(q, 0, kNumPages * kPageSize); + (void)mlock(q, kNumPages * kPageSize); ::benchmark::DoNotOptimize(q); + EXPECT_THAT(r.Get(p, kHead), Optional(FieldsAre(0, 0))); + EXPECT_THAT(r.Get(last, kTail), Optional(FieldsAre(0, 0))); + EXPECT_THAT(r.Get(q, kPageSize), Optional(FieldsAre(kPageSize, 0))); - EXPECT_THAT(r.Get(p, (kNumPages + 2) * kPageSize), + EXPECT_THAT(r.Get(q, (kNumPages + 2) * kPageSize), Optional(FieldsAre(kPageSize * kNumPages, 0))); + EXPECT_THAT(r.Get(reinterpret_cast(q) + 7, kPageSize - 7), + Optional(FieldsAre(kPageSize - 7, 0))); + + EXPECT_THAT(r.Get(reinterpret_cast(q) + 7, kPageSize), + Optional(FieldsAre(kPageSize, 0))); + EXPECT_THAT(r.Get(reinterpret_cast(q) + 7, 3 * kPageSize), Optional(FieldsAre(kPageSize * 3, 0))); + EXPECT_THAT(r.Get(reinterpret_cast(q) + 7, kNumPages * kPageSize), + Optional(FieldsAre(kPageSize * kNumPages - 7, 0))); + + EXPECT_THAT( + r.Get(reinterpret_cast(q) + 7, kNumPages * kPageSize - 7), + Optional(FieldsAre(kPageSize * kNumPages - 7, 0))); + EXPECT_THAT( r.Get(reinterpret_cast(q) + 7, (kNumPages + 1) * kPageSize), Optional(FieldsAre(kPageSize * kNumPages - 7, 0))); + EXPECT_THAT( + r.Get(reinterpret_cast(q) + 7, (kNumPages + 1) * kPageSize - 7), + Optional(FieldsAre(kPageSize * kNumPages - 7, 0))); + ASSERT_EQ(munmap(q, kNumPages * kPageSize), 0); } }