From dcd47ef7b00b2c16a9c51a470725439cc6ab9294 Mon Sep 17 00:00:00 2001 From: Denys Makoviichuk Date: Sun, 1 Sep 2024 15:50:23 -0700 Subject: [PATCH] [Bugfix] Fixed is allocated (#12109) fixed is_allocated removed new include --- ttnn/cpp/ttnn/tensor/tensor.cpp | 27 ++--------------------- ttnn/cpp/ttnn/tensor/types.hpp | 38 ++++++++++++++++++++++++++++++--- 2 files changed, 37 insertions(+), 28 deletions(-) diff --git a/ttnn/cpp/ttnn/tensor/tensor.cpp b/ttnn/cpp/ttnn/tensor/tensor.cpp index 7a1cb08459c..96e0fe28fd6 100644 --- a/ttnn/cpp/ttnn/tensor/tensor.cpp +++ b/ttnn/cpp/ttnn/tensor/tensor.cpp @@ -20,6 +20,7 @@ #include "tt_metal/graph/graph_tracking.hpp" #include "ttnn/core.hpp" #include "ttnn/tensor/tensor_ops.hpp" + using namespace tt::constants; @@ -450,31 +451,7 @@ bool Tensor::is_allocated() const { ZoneScoped; auto output = std::visit( [](auto&& storage) -> bool { - using T = std::decay_t; - if constexpr (std::is_same_v) { - return std::visit([](auto&& buffer) -> bool { return buffer.is_allocated(); }, storage.buffer); - } else if constexpr (std::is_same_v) { - return bool(storage.buffer) and storage.buffer->size() > 0; - } else if constexpr (std::is_same_v) { - return true; - } else if constexpr (std::is_same_v) { - bool is_allocated = true; - for (int i = 0; i < storage.num_buffers(); i++) { - is_allocated &= - std::visit([](auto&& buffer) -> bool { return buffer.is_allocated(); }, storage.get_buffer(i)); - } - return is_allocated; - } else if constexpr (std::is_same_v) { - bool is_allocated = true; - for (int i = 0; i < storage.ordered_device_ids.size(); ++i) { - auto device_id = storage.ordered_device_ids[i]; - const auto& buffer = storage.get_buffer_for_device_id(device_id); - is_allocated &= bool(buffer) and buffer->size() > 0; - } - return is_allocated; - } else { - raise_unsupported_storage(); - } + return storage.is_allocated(); }, this->get_storage()); return output; diff --git a/ttnn/cpp/ttnn/tensor/types.hpp b/ttnn/cpp/ttnn/tensor/types.hpp index d6c94ac8c9e..f30339a80c8 100644 --- a/ttnn/cpp/ttnn/tensor/types.hpp +++ b/ttnn/cpp/ttnn/tensor/types.hpp @@ -306,7 +306,7 @@ struct OwnedStorage { static constexpr auto attribute_names = std::forward_as_tuple(); const auto attribute_values() const { return std::forward_as_tuple(); } - inline void insert_buffer(OwnedBuffer buffer_) { + inline void insert_buffer(const OwnedBuffer& buffer_) { this->buffer = buffer_; } @@ -314,6 +314,10 @@ struct OwnedStorage { return this->buffer; } + inline bool is_allocated() const { + return std::visit([](auto&& buffer) -> bool { return buffer.is_allocated(); }, buffer); + } + }; using DeviceBuffer = std::shared_ptr; @@ -344,6 +348,10 @@ struct DeviceStorage { inline DeviceBuffer get_buffer() const { return this->buffer; } static constexpr auto attribute_names = std::forward_as_tuple("memory_config"); const auto attribute_values() const { return std::make_tuple(this->memory_config()); } + + inline bool is_allocated() const { + return buffer && buffer->size() > 0; + } }; using BorrowedBuffer = std::variant< @@ -404,6 +412,11 @@ struct BorrowedStorage { static constexpr auto attribute_names = std::forward_as_tuple(); const auto attribute_values() const { return std::forward_as_tuple(); } + + inline bool is_allocated() const { + return true; + } + }; struct MultiDeviceHostStorage { @@ -452,7 +465,7 @@ struct MultiDeviceHostStorage { // Helper Functions - Getters and setters to get/modify storage attributes. These are needed to // preinitialize empty tensor handles and use/populate them in the worker threads. - void insert_buffer_and_shape_for_device(int buffer_index, const OwnedBuffer buffer, const Shape shape) { + void insert_buffer_and_shape_for_device(int buffer_index, const OwnedBuffer& buffer, const Shape shape) { std::lock_guard lock(mtx); buffers[buffer_index] = buffer; shapes[buffer_index] = shape; @@ -461,7 +474,7 @@ struct MultiDeviceHostStorage { OwnedBuffer get_buffer(int buffer_index) const { std::lock_guard lock(mtx); TT_ASSERT(buffer_index < buffers.size(), "Buffer not found for buffer_index " + std::to_string(buffer_index)); - return buffers[buffer_index];; + return buffers[buffer_index]; } OwnedBuffer& get_buffer(int buffer_index) { @@ -480,6 +493,16 @@ struct MultiDeviceHostStorage { std::lock_guard lock(mtx); return buffers.size(); } + + inline bool is_allocated() const { + // not sure what is better mutex for each buffer 10 times or one here. + // I think this one is better. + std::lock_guard lock(mtx); + + return std::all_of(buffers.begin(), buffers.end(), [](auto&& buffer) { + return std::visit([](auto&& buffer) -> bool { return buffer.is_allocated(); }, buffer); + }); + } }; struct MultiDeviceStorage { @@ -609,6 +632,15 @@ struct MultiDeviceHostStorage { std::lock_guard lock(buffer_mtx); return buffers.find(device_id) != buffers.end(); } + + inline bool is_allocated() const { + std::lock_guard lock(buffer_mtx); + + return std::all_of(ordered_device_ids.begin(), ordered_device_ids.end(), [&buffers = this->buffers](auto&& device_id) { + const auto& buffer = buffers.at(device_id); + return buffer && buffer->size() > 0; + }); + } }; using Storage = std::variant;