diff --git a/tests/ttnn/unit_tests/gtests/test_graph_basic.cpp b/tests/ttnn/unit_tests/gtests/test_graph_basic.cpp index 6bd7b8de9f90..162c303e2094 100644 --- a/tests/ttnn/unit_tests/gtests/test_graph_basic.cpp +++ b/tests/ttnn/unit_tests/gtests/test_graph_basic.cpp @@ -40,6 +40,14 @@ TEST_P(BufferTestFixture, BufferTest) { const auto input_tensor_a = tt::tt_metal::create_device_tensor(input_a, device); } { + const auto input_a = ttnn::TensorSpec( + params.shape_a, + tt::tt_metal::TensorLayout( + tt::tt_metal::DataType::BFLOAT16, + tt::tt_metal::PageConfig(tt::tt_metal::Layout::TILE), + ttnn::L1_MEMORY_CONFIG)); + const auto input_tensor_a = tt::tt_metal::create_device_tensor(input_a, device); + const auto input_b = ttnn::TensorSpec( params.shape_b, tt::tt_metal::TensorLayout( @@ -63,20 +71,25 @@ TEST_P(BufferTestFixture, BufferTest) { // Check if there are two buffer_allocate_nodes, and if each is connected to only one different buffer auto buffer_allocate_nodes = find_nodes_by_type(trace, kNodeBufferAllocate); - EXPECT_EQ(buffer_allocate_nodes.size(), 2); + EXPECT_EQ(buffer_allocate_nodes.size(), 3); for (const auto& node : buffer_allocate_nodes) { EXPECT_EQ(node.at(kConnections).size(), 1); } auto connection_a = buffer_allocate_nodes[0].at(kConnections)[0].get(); - auto connection_b = buffer_allocate_nodes[1].at(kConnections)[0].get(); - EXPECT_NE(connection_a, connection_b); + auto connection_a2 = buffer_allocate_nodes[1].at(kConnections)[0].get(); + auto connection_c = buffer_allocate_nodes[2].at(kConnections)[0].get(); + EXPECT_NE(connection_a, connection_a2); + EXPECT_NE(connection_a, connection_c); + EXPECT_NE(connection_a2, connection_c); // Check if there are two buffer nodes and they have correct sizes auto buffer_nodes = find_nodes_by_type(trace, kNodeBuffer); - EXPECT_EQ(buffer_nodes.size(), 2); + EXPECT_EQ(buffer_nodes.size(), 3); auto size_a = std::stoi(buffer_nodes[0].at(kParams).at(kSize).get()); EXPECT_EQ(params.shape_a.volume() * 2, size_a); - auto size_b = std::stoi(buffer_nodes[1].at(kParams).at(kSize).get()); + auto size_a2 = std::stoi(buffer_nodes[1].at(kParams).at(kSize).get()); + EXPECT_EQ(params.shape_a.volume() * 2, size_a2); + auto size_b = std::stoi(buffer_nodes[2].at(kParams).at(kSize).get()); EXPECT_EQ(params.shape_b.volume() * 2, size_b); // Print the trace for reference diff --git a/tt_metal/impl/buffers/buffer.cpp b/tt_metal/impl/buffers/buffer.cpp index f0b783d33cf3..81031f72a392 100644 --- a/tt_metal/impl/buffers/buffer.cpp +++ b/tt_metal/impl/buffers/buffer.cpp @@ -25,6 +25,8 @@ namespace tt { namespace tt_metal { +size_t Buffer::next_unique_id = 0; + std::ostream& operator<<(std::ostream& os, const ShardSpec& spec) { tt::stl::reflection::operator<<(os, spec); return os; @@ -247,6 +249,7 @@ Buffer::Buffer( if (size != 0) { validate_buffer_size_and_page_size(size, page_size, buffer_type, buffer_layout, shard_parameters); } + unique_id_ = next_unique_id++; } std::shared_ptr Buffer::create( diff --git a/tt_metal/impl/buffers/buffer.hpp b/tt_metal/impl/buffers/buffer.hpp index a8dff2c072d5..e5eded3f655b 100644 --- a/tt_metal/impl/buffers/buffer.hpp +++ b/tt_metal/impl/buffers/buffer.hpp @@ -248,6 +248,8 @@ class Buffer final { std::optional sub_device_id() const { return sub_device_id_; } std::optional sub_device_manager_id() const { return sub_device_manager_id_; } + size_t unique_id() const { return unique_id_; } + Buffer( Device *device, DeviceAddr size, @@ -300,6 +302,8 @@ class Buffer final { std::shared_ptr buffer_page_mapping_; std::weak_ptr weak_self; + size_t unique_id_; + static size_t next_unique_id; }; } // namespace v0 diff --git a/ttnn/cpp/ttnn/graph/graph_processor.cpp b/ttnn/cpp/ttnn/graph/graph_processor.cpp index e925228a6208..67581ce62ff6 100644 --- a/ttnn/cpp/ttnn/graph/graph_processor.cpp +++ b/ttnn/cpp/ttnn/graph/graph_processor.cpp @@ -307,27 +307,8 @@ int GraphProcessor::add_tensor(const Tensor& t) { return tensor_counter; } -namespace detail -{ - // hash buffer because address is not unique - inline size_t hash_object(const tt::tt_metal::Buffer* buffer) noexcept { - std::size_t seed = 0; - auto hash_combine = [&seed](auto&& value) { - std::hash> hasher; - seed ^= hasher(value) + 0x9e3779b9 + (seed << 6) + (seed >> 2); - }; - - hash_combine(reinterpret_cast(buffer)); - hash_combine(buffer->size()); - hash_combine(buffer->page_size()); - hash_combine(buffer->buffer_type()); - - return seed; - } -} - int GraphProcessor::add_buffer(const tt::tt_metal::Buffer* buffer) { - auto buffer_alloc_id = detail::hash_object(buffer); + auto buffer_alloc_id = buffer->unique_id(); auto counter = id_to_counter.count(buffer_alloc_id) > 0 ? id_to_counter[buffer_alloc_id] : graph.size(); if (id_to_counter.count(buffer_alloc_id) == 0) { std::unordered_map params = {