Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Batch up scheduling of deferred deletions #3030

Merged
merged 10 commits into from
Dec 13, 2024
3 changes: 3 additions & 0 deletions src/mbgl/renderer/tile_pyramid.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ void TilePyramid::update(const std::vector<Immutable<style::LayerProperties>>& l

tiles.clear();
renderedTiles.clear();
cache.deferPendingReleases();

return;
}
Expand Down Expand Up @@ -279,6 +280,8 @@ void TilePyramid::update(const std::vector<Immutable<style::LayerProperties>>& l
tile.usedByRenderedLayers |= tile.layerPropertiesUpdated(layerProperties);
}
}

cache.deferPendingReleases();
}

void TilePyramid::handleWrapJump(float lng) {
Expand Down
56 changes: 45 additions & 11 deletions src/mbgl/tile/tile_cache.cpp
Original file line number Diff line number Diff line change
@@ -1,10 +1,22 @@
#include <mbgl/tile/tile_cache.hpp>

#include <mbgl/actor/scheduler.hpp>
#include <mbgl/util/instrumentation.hpp>

#include <cassert>

namespace mbgl {

TileCache::~TileCache() {
MLN_TRACE_FUNC();

clear();
pendingReleases.clear();

std::unique_lock counterLock{deferredSignalLock};
deferredSignal.wait(counterLock, [&]() { return deferredDeletionsPending == 0; });
}

void TileCache::setSize(size_t size_) {
MLN_TRACE_FUNC();

Expand All @@ -26,23 +38,46 @@ void TileCache::setSize(size_t size_) {
}

namespace {

/// This exists solely to prevent a problem where temporary lambda captures
/// are retained for the duration of the scope instead of being destroyed immediately.
template <typename T>
struct CaptureWrapper {
CaptureWrapper(std::unique_ptr<T>&& item_)
: item(std::move(item_)) {}
CaptureWrapper(std::vector<std::unique_ptr<Tile>>&& items_)
: items(items_.size()) {
std::ranges::move(items_, items.begin());
}
CaptureWrapper(CaptureWrapper&&) = default;

/// This copy constructor is required to build, but doesn't seem to be called.
CaptureWrapper(const CaptureWrapper& other)
: item(other.item) {}
std::shared_ptr<T> item;
: items(other.items) {}

std::vector<std::shared_ptr<Tile>> items;
};
} // namespace

void TileCache::deferredRelease(std::unique_ptr<Tile>&& tile) {
MLN_TRACE_FUNC();

tile->cancel();
pendingReleases.push_back(std::move(tile));
}

void TileCache::deferPendingReleases() {
MLN_TRACE_FUNC();

constexpr std::size_t scheduleThreshold = 1;
if (pendingReleases.size() < scheduleThreshold) {
return;
}

// Block destruction until the cleanup task is complete
{
std::lock_guard counterLock{deferredSignalLock};
deferredDeletionsPending++;
}

CaptureWrapper wrap{std::move(pendingReleases)};
pendingReleases.clear();

// The `std::function` must be created in a separate statement from the `schedule` call.
// Creating a `std::function` from a lambda involves a copy, which is why we must use
Expand All @@ -51,17 +86,16 @@ void TileCache::deferredRelease(std::unique_ptr<Tile>&& tile) {
// If this temporary outlives the `schedule` call, and the function is executed immediately
// by a waiting thread and is already complete, that temporary reference ends up being the
// last one and the destruction actually occurs here on this thread.
std::function<void()> func{[tile_{CaptureWrapper<Tile>{std::move(tile)}}, this]() mutable {
tile_.item = {};
std::function<void()> func{[tile_{CaptureWrapper{std::move(wrap)}}, this]() mutable {
MLN_TRACE_ZONE(deferPendingReleases lambda);
MLN_ZONE_VALUE(wrap_.releases.size());
tile_.items.clear();

std::lock_guard<std::mutex> counterLock(deferredSignalLock);
deferredDeletionsPending--;
deferredSignal.notify_all();
}};

std::unique_lock<std::mutex> counterLock(deferredSignalLock);
deferredDeletionsPending++;

threadPool.schedule(std::move(func));
}

Expand Down
16 changes: 6 additions & 10 deletions src/mbgl/tile/tile_cache.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,7 @@ class TileCache {
TileCache(const TaggedScheduler& threadPool_, size_t size_ = 0)
: threadPool(threadPool_),
size(size_) {}

~TileCache() {
clear();

std::unique_lock<std::mutex> counterLock(deferredSignalLock);
while (deferredDeletionsPending != 0) {
deferredSignal.wait(counterLock);
}
}
~TileCache();

/// Change the maximum size of the cache.
void setSize(size_t);
Expand All @@ -43,13 +35,17 @@ class TileCache {
bool has(const OverscaledTileID& key);
void clear();

/// Destroy a tile without blocking
/// Set aside a tile to be destroyed later, without blocking
void deferredRelease(std::unique_ptr<Tile>&&);

/// Schedule any accumulated deferred tiles to be destroyed
void deferPendingReleases();

private:
std::map<OverscaledTileID, std::unique_ptr<Tile>> tiles;
std::list<OverscaledTileID> orderedKeys;
TaggedScheduler threadPool;
std::vector<std::unique_ptr<Tile>> pendingReleases;
size_t deferredDeletionsPending{0};
std::mutex deferredSignalLock;
std::condition_variable deferredSignal;
Expand Down
2 changes: 1 addition & 1 deletion src/mbgl/vulkan/context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -596,7 +596,7 @@ void Context::buildUniformDescriptorSetLayout(vk::UniqueDescriptorSetLayout& lay

for (size_t i = 0; i < uniformCount; ++i) {
bindings.push_back(vk::DescriptorSetLayoutBinding()
.setBinding(i)
.setBinding(static_cast<uint32_t>(i))
.setStageFlags(stageFlags)
.setDescriptorType(vk::DescriptorType::eUniformBuffer)
.setDescriptorCount(1));
Expand Down
12 changes: 7 additions & 5 deletions src/mbgl/vulkan/descriptor_set.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ void DescriptorSet::createDescriptorPool(DescriptorPoolGrowable& growablePool) {
const auto descriptorPoolInfo = vk::DescriptorPoolCreateInfo(poolFlags).setPoolSizes(size).setMaxSets(maxSets);

growablePool.pools.emplace_back(device->createDescriptorPoolUnique(descriptorPoolInfo), maxSets);
growablePool.currentPoolIndex = growablePool.pools.size() - 1;
growablePool.currentPoolIndex = static_cast<int32_t>(growablePool.pools.size() - 1);
};

void DescriptorSet::allocate() {
Expand All @@ -73,7 +73,8 @@ void DescriptorSet::allocate() {
growablePool.pools.begin(), growablePool.pools.end(), [&](const auto& p) { return !p.unusedSets.empty(); });

if (unusedPoolIt != growablePool.pools.end()) {
growablePool.currentPoolIndex = std::distance(growablePool.pools.begin(), unusedPoolIt);
growablePool.currentPoolIndex = static_cast<int32_t>(
std::distance(growablePool.pools.begin(), unusedPoolIt));
} else
#endif
{
Expand All @@ -83,7 +84,8 @@ void DescriptorSet::allocate() {
[&](const auto& p) { return p.remainingSets >= layouts.size(); });

if (freePoolIt != growablePool.pools.end()) {
growablePool.currentPoolIndex = std::distance(growablePool.pools.begin(), freePoolIt);
growablePool.currentPoolIndex = static_cast<int32_t>(
std::distance(growablePool.pools.begin(), freePoolIt));
} else {
createDescriptorPool(growablePool);
}
Expand Down Expand Up @@ -161,7 +163,7 @@ void UniformDescriptorSet::update(const gfx::UniformBufferArray& uniforms,
.setBufferInfo(descriptorBufferInfo)
.setDescriptorCount(1)
.setDescriptorType(vk::DescriptorType::eUniformBuffer)
.setDstBinding(index)
.setDstBinding(static_cast<uint32_t>(index))
.setDstSet(descriptorSets[frameIndex]);

device->updateDescriptorSets(writeDescriptorSet, nullptr);
Expand Down Expand Up @@ -197,7 +199,7 @@ void ImageDescriptorSet::update(const std::array<gfx::Texture2DPtr, shaders::max
.setImageInfo(descriptorImageInfo)
.setDescriptorCount(1)
.setDescriptorType(vk::DescriptorType::eCombinedImageSampler)
.setDstBinding(id)
.setDstBinding(static_cast<uint32_t>(id))
.setDstSet(descriptorSets[frameIndex]);

device->updateDescriptorSets(writeDescriptorSet, nullptr);
Expand Down
Loading