From 3231aa217ebc22b744e2acb545f6e88e41478059 Mon Sep 17 00:00:00 2001 From: Juan Camilo Vega Date: Fri, 8 Nov 2024 16:34:15 +0000 Subject: [PATCH] #14517: Providing more verbose error statements --- ttnn/cpp/ttnn/distributed/api.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ttnn/cpp/ttnn/distributed/api.cpp b/ttnn/cpp/ttnn/distributed/api.cpp index 749b72579cb..6b691de8bca 100644 --- a/ttnn/cpp/ttnn/distributed/api.cpp +++ b/ttnn/cpp/ttnn/distributed/api.cpp @@ -66,7 +66,8 @@ Tensor aggregate_as_tensor(std::vector& tensor_shards) host_owned_buffers.push_back(std::get(shard.get_storage()).buffer); shapes.push_back(shard.get_shape()); if (shard.get_tile() != tile) { - TT_THROW("All tensor shards must have the same tile size"); + TT_THROW("Error aggregating multichip tensors: Attempting to aggregate tensors with different tiling configurations. Device {} has tiling ({}x{}) while device {} has tiling {}x{}." + ,tensor_shards.at(0).device()->id(), tile.get_height(), tile.get_width(), shard.device()->id(), shard.get_tile().get_height(), shard.get_tile().get_width()); } } auto storage = MultiDeviceHostStorage{AllGatherTensor(), std::move(host_owned_buffers), shapes}; @@ -82,7 +83,8 @@ Tensor aggregate_as_tensor(std::vector& tensor_shards) device_buffers.insert({device->id(), std::get(shard.get_storage()).buffer}); shapes.insert({device->id(), shard.get_shape()}); if (shard.get_tile() != tile) { - TT_THROW("All tensor shards must have the same tile size"); + TT_THROW("Error aggregating multichip tensors: Attempting to aggregate tensors with different tiling configurations. Device {} has tiling ({}x{}) while device {} has tiling {}x{}." + ,tensor_shards.at(0).device()->id(), tile.get_height(), tile.get_width(), shard.device()->id(), shard.get_tile().get_height(), shard.get_tile().get_width()); } } auto storage = MultiDeviceStorage{AllGatherTensor(), ordered_device_ids, std::move(device_buffers), shapes};