Skip to content

Commit

Permalink
Avoid taking pageheap_lock for span allocations.
Browse files Browse the repository at this point in the history
span_allocator is internally thread-safe, so we do not need to access the page
heap lock to guard it.

PiperOrigin-RevId: 704302332
Change-Id: I11f91ad80aa0349587bede7c7c6cfc291f7359f2
  • Loading branch information
ckennelly authored and copybara-github committed Dec 9, 2024
1 parent c6141bb commit 405a774
Show file tree
Hide file tree
Showing 4 changed files with 96 additions and 32 deletions.
113 changes: 84 additions & 29 deletions tcmalloc/huge_page_aware_allocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,13 @@ class StaticForwarder {
static void SetHugepage(HugePage p, void* pt);

// SpanAllocator state.
static Span* NewSpan(Range r) ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock)
ABSL_ATTRIBUTE_RETURNS_NONNULL;
static Span* NewSpan(Range r)
#ifdef TCMALLOC_INTERNAL_LEGACY_LOCKING
ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock)
#else
ABSL_LOCKS_EXCLUDED(pageheap_lock)
#endif // TCMALLOC_INTERNAL_LEGACY_LOCKING
ABSL_ATTRIBUTE_RETURNS_NONNULL;
static void DeleteSpan(Span* span)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock) ABSL_ATTRIBUTE_NONNULL();

Expand Down Expand Up @@ -361,19 +366,30 @@ class HugePageAwareAllocator final : public PageAllocatorInterface {
ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock);
// Helpers for New().

Span* LockAndAlloc(Length n, SpanAllocInfo span_alloc_info,
bool* from_released);
#ifdef TCMALLOC_INTERNAL_LEGACY_LOCKING
using FinalizeType = Span*;
#else // !TCMALLOC_INTERNAL_LEGACY_LOCKING
struct FinalizeType {
Range r;
bool donated = false;
};
#endif // !TCMALLOC_INTERNAL_LEGACY_LOCKING

FinalizeType LockAndAlloc(Length n, SpanAllocInfo span_alloc_info,
bool* from_released);

Span* AllocSmall(Length n, SpanAllocInfo span_alloc_info, bool* from_released)
FinalizeType AllocSmall(Length n, SpanAllocInfo span_alloc_info,
bool* from_released)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock);
Span* AllocLarge(Length n, SpanAllocInfo span_alloc_info, bool* from_released)
FinalizeType AllocLarge(Length n, SpanAllocInfo span_alloc_info,
bool* from_released)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock);
Span* AllocEnormous(Length n, SpanAllocInfo span_alloc_info,
bool* from_released)
FinalizeType AllocEnormous(Length n, SpanAllocInfo span_alloc_info,
bool* from_released)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock);

Span* AllocRawHugepages(Length n, SpanAllocInfo span_alloc_info,
bool* from_released)
FinalizeType AllocRawHugepages(Length n, SpanAllocInfo span_alloc_info,
bool* from_released)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock);

bool AddRegion() ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock);
Expand All @@ -385,7 +401,9 @@ class HugePageAwareAllocator final : public PageAllocatorInterface {
ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock);

// Finish an allocation request - give it a span and mark it in the pagemap.
Span* Finalize(Range r);
FinalizeType Finalize(Range r);

Span* Spanify(FinalizeType f);

// Whether this HPAA should use subrelease. This delegates to the appropriate
// parameter depending whether this is for the cold heap or another heap.
Expand Down Expand Up @@ -466,23 +484,30 @@ inline PageId HugePageAwareAllocator<Forwarder>::RefillFiller(
}

template <class Forwarder>
inline Span* HugePageAwareAllocator<Forwarder>::Finalize(Range r)
inline typename HugePageAwareAllocator<Forwarder>::FinalizeType
HugePageAwareAllocator<Forwarder>::Finalize(Range r)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock) {
TC_ASSERT_NE(r.p, PageId{0});
info_.RecordAlloc(r);
forwarder_.ShrinkToUsageLimit(r.n);
#ifdef TCMALLOC_INTERNAL_LEGACY_LOCKING
// TODO(b/175334169): Lift Span creation out of LockAndAlloc.
Span* ret = forwarder_.NewSpan(r);
forwarder_.Set(r.p, ret);
TC_ASSERT(!ret->sampled());
info_.RecordAlloc(r);
forwarder_.ShrinkToUsageLimit(r.n);
return ret;
#else
return {r, false};
#endif
}

// For anything <= half a huge page, we will unconditionally use the filler
// to pack it into a single page. If we need another page, that's fine.
template <class Forwarder>
inline Span* HugePageAwareAllocator<Forwarder>::AllocSmall(
Length n, SpanAllocInfo span_alloc_info, bool* from_released) {
inline typename HugePageAwareAllocator<Forwarder>::FinalizeType
HugePageAwareAllocator<Forwarder>::AllocSmall(Length n,
SpanAllocInfo span_alloc_info,
bool* from_released) {
auto [pt, page, released] = filler_.TryGet(n, span_alloc_info);
*from_released = released;
if (ABSL_PREDICT_TRUE(pt != nullptr)) {
Expand All @@ -491,14 +516,16 @@ inline Span* HugePageAwareAllocator<Forwarder>::AllocSmall(

page = RefillFiller(n, span_alloc_info, from_released);
if (ABSL_PREDICT_FALSE(page == PageId{0})) {
return nullptr;
return {};
}
return Finalize(Range(page, n));
}

template <class Forwarder>
inline Span* HugePageAwareAllocator<Forwarder>::AllocLarge(
Length n, SpanAllocInfo span_alloc_info, bool* from_released) {
inline typename HugePageAwareAllocator<Forwarder>::FinalizeType
HugePageAwareAllocator<Forwarder>::AllocLarge(Length n,
SpanAllocInfo span_alloc_info,
bool* from_released) {
// If it's an exact page multiple, just pull it from pages directly.
HugeLength hl = HLFromPages(n);
if (hl.in_pages() == n) {
Expand Down Expand Up @@ -561,18 +588,21 @@ inline Span* HugePageAwareAllocator<Forwarder>::AllocLarge(
}

template <class Forwarder>
inline Span* HugePageAwareAllocator<Forwarder>::AllocEnormous(
Length n, SpanAllocInfo span_alloc_info, bool* from_released) {
inline typename HugePageAwareAllocator<Forwarder>::FinalizeType
HugePageAwareAllocator<Forwarder>::AllocEnormous(Length n,
SpanAllocInfo span_alloc_info,
bool* from_released) {
return AllocRawHugepages(n, span_alloc_info, from_released);
}

template <class Forwarder>
inline Span* HugePageAwareAllocator<Forwarder>::AllocRawHugepages(
inline typename HugePageAwareAllocator<Forwarder>::FinalizeType
HugePageAwareAllocator<Forwarder>::AllocRawHugepages(
Length n, SpanAllocInfo span_alloc_info, bool* from_released) {
HugeLength hl = HLFromPages(n);

HugeRange r = cache_.Get(hl, from_released);
if (!r.valid()) return nullptr;
if (!r.valid()) return {};

// We now have a huge page range that covers our request. There
// might be some slack in it if n isn't a multiple of
Expand All @@ -593,9 +623,14 @@ inline Span* HugePageAwareAllocator<Forwarder>::AllocRawHugepages(
Length here = kPagesPerHugePage - slack;
TC_ASSERT_GT(here, Length(0));
AllocAndContribute(last, here, span_alloc_info, /*donated=*/true);
Span* span = Finalize(Range(r.start().first_page(), n));
auto span = Finalize(Range(r.start().first_page(), n));
#ifdef TCMALLOC_INTERNAL_LEGACY_LOCKING
span->set_donated(/*value=*/true);
return span;
#else
span.donated = true;
return span;
#endif
}

inline static void BackSpan(Span* span) {
Expand All @@ -608,7 +643,7 @@ inline Span* HugePageAwareAllocator<Forwarder>::New(
Length n, SpanAllocInfo span_alloc_info) {
TC_CHECK_GT(n, Length(0));
bool from_released;
Span* s = LockAndAlloc(n, span_alloc_info, &from_released);
Span* s = Spanify(LockAndAlloc(n, span_alloc_info, &from_released));
if (s) {
// Prefetch for writing, as we anticipate using the memory soon.
PrefetchW(s->start_address());
Expand All @@ -619,8 +654,10 @@ inline Span* HugePageAwareAllocator<Forwarder>::New(
}

template <class Forwarder>
inline Span* HugePageAwareAllocator<Forwarder>::LockAndAlloc(
Length n, SpanAllocInfo span_alloc_info, bool* from_released) {
inline typename HugePageAwareAllocator<Forwarder>::FinalizeType
HugePageAwareAllocator<Forwarder>::LockAndAlloc(Length n,
SpanAllocInfo span_alloc_info,
bool* from_released) {
PageHeapSpinLockHolder l;
// Our policy depends on size. For small things, we will pack them
// into single hugepages.
Expand Down Expand Up @@ -651,16 +688,34 @@ inline Span* HugePageAwareAllocator<Forwarder>::NewAligned(
// TODO(b/134690769): support higher align.
TC_CHECK_LE(align, kPagesPerHugePage);
bool from_released;
Span* s;
FinalizeType f;
{
PageHeapSpinLockHolder l;
s = AllocRawHugepages(n, span_alloc_info, &from_released);
f = AllocRawHugepages(n, span_alloc_info, &from_released);
}
Span* s = Spanify(f);
if (s && from_released) BackSpan(s);
TC_ASSERT(!s || GetMemoryTag(s->start_address()) == tag_);
return s;
}

template <class Forwarder>
inline Span* HugePageAwareAllocator<Forwarder>::Spanify(FinalizeType f) {
#ifdef TCMALLOC_INTERNAL_LEGACY_LOCKING
return f;
#else
if (ABSL_PREDICT_FALSE(f.r.p == PageId{0})) {
return nullptr;
}

Span* s = forwarder_.NewSpan(f.r);
forwarder_.Set(f.r.p, s);
TC_ASSERT(!s->sampled());
s->set_donated(f.donated);
return s;
#endif
}

template <class Forwarder>
inline void HugePageAwareAllocator<Forwarder>::DeleteFromHugepage(
FillerType::Tracker* pt, Range r, bool might_abandon) {
Expand Down
7 changes: 5 additions & 2 deletions tcmalloc/mock_huge_page_static_forwarder.h
Original file line number Diff line number Diff line change
Expand Up @@ -126,8 +126,11 @@ class FakeStaticForwarder {
void SetHugepage(HugePage p, void* pt) { trackers_[p] = pt; }

// SpanAllocator state.
[[nodiscard]] Span* NewSpan(Range r) ABSL_EXCLUSIVE_LOCKS_REQUIRED(
pageheap_lock) ABSL_ATTRIBUTE_RETURNS_NONNULL {
[[nodiscard]] Span* NewSpan(Range r)
#ifdef TCMALLOC_INTERNAL_LEGACY_LOCKING
ABSL_EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock)
#endif // TCMALLOC_INTERNAL_LEGACY_LOCKING
ABSL_ATTRIBUTE_RETURNS_NONNULL {
Span* span;
void* result = absl::base_internal::LowLevelAlloc::AllocWithArena(
sizeof(*span) + alignof(Span) + sizeof(void*), ll_arena());
Expand Down
6 changes: 5 additions & 1 deletion tcmalloc/span.h
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,11 @@ class Span final : public SpanList::Elem {

// Allocator/deallocator for spans. Note that these functions are defined
// in static_vars.h, which is weird: see there for why.
static Span* New(Range r);
static Span* New(Range r)
#ifndef TCMALLOC_INTERNAL_LEGACY_LOCKING
ABSL_LOCKS_EXCLUDED(pageheap_lock)
#endif
;
static void Delete(Span* span);

static void operator delete(void*) = delete;
Expand Down
2 changes: 2 additions & 0 deletions tcmalloc/span_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,9 @@ TEST(SpanAllocatorTest, Alignment) {
spans.reserve(kNumSpans);

{
#ifdef TCMALLOC_INTERNAL_LEGACY_LOCKING
PageHeapSpinLockHolder l;
#endif // TCMALLOC_INTERNAL_LEGACY_LOCKING
for (int i = 0; i < kNumSpans; ++i) {
spans.push_back(Span::New(r));
}
Expand Down

0 comments on commit 405a774

Please sign in to comment.