Skip to content

Commit

Permalink
Feedback from review
Browse files Browse the repository at this point in the history
  • Loading branch information
omilyutin-tt committed Dec 12, 2024
1 parent c135e39 commit eb71e44
Show file tree
Hide file tree
Showing 8 changed files with 269 additions and 120 deletions.
2 changes: 2 additions & 0 deletions tests/ttnn/unit_tests/gtests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,10 @@ set(TTNN_TENSOR_UNIT_TESTS_SRC
${CMAKE_CURRENT_SOURCE_DIR}/tensor/test_create_tensor_multi_device.cpp
${CMAKE_CURRENT_SOURCE_DIR}/tensor/test_create_tensor_with_layout.cpp
${CMAKE_CURRENT_SOURCE_DIR}/tensor/test_distributed_tensor.cpp
${CMAKE_CURRENT_SOURCE_DIR}/tensor/test_partition.cpp
${CMAKE_CURRENT_SOURCE_DIR}/tensor/test_shape_base.cpp
${CMAKE_CURRENT_SOURCE_DIR}/tensor/test_sharding_with_alignment.cpp
${CMAKE_CURRENT_SOURCE_DIR}/tensor/test_vector_conversion.cpp
)

add_executable(unit_tests_ttnn ${TTNN_UNIT_TESTS_SRC})
Expand Down
47 changes: 26 additions & 21 deletions tests/ttnn/unit_tests/gtests/tensor/test_distributed_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,10 @@

#include <gtest/gtest.h>

#include "common/bfloat16.hpp"
#include "ttnn/distributed/api.hpp"
#include "ttnn/operations/functions.hpp"
#include "ttnn/tensor/xtensor/conversion_utils.hpp"
#include "ttnn_test_fixtures.hpp"
#include <exception>
#include <ttnn/distributed/types.hpp>
#include <ttnn/distributed/distributed_tensor.hpp>

Expand All @@ -20,21 +18,23 @@ using ::ttnn::experimental::xtensor::from_vector;
using TensorDistributionTest = T3kMultiDeviceFixture;

TEST_F(TensorDistributionTest, Replication) {
Tensor input_tensor = from_vector(std::vector<float>{42.F, 13.F, -99.F}, ttnn::Shape{1, 1, 1, 3});
Tensor input_tensor =
from_vector(std::vector<float>{42.F, 13.F, -99.F}, ttnn::SimpleShape{1, 1, 1, 3}, DataType::FLOAT32);

auto mapper = api::replicate_tensor_to_mesh_mapper(*mesh_device_);
Tensor replicated_tensor = api::distribute_tensor(input_tensor, *mesh_device_, *mapper);

std::vector<Tensor> device_tensors = api::get_device_tensors(replicated_tensor);
EXPECT_EQ(device_tensors.size(), mesh_device_->num_devices());
for (const auto& device_tensor : device_tensors) {
EXPECT_TRUE(ttnn::allclose<bfloat16>(device_tensor.cpu(), input_tensor));
EXPECT_TRUE(ttnn::allclose<float>(device_tensor.cpu(), input_tensor));
}
}

TEST_F(TensorDistributionTest, Shard1DInvalidDim) {
const int num_devices = mesh_device_->num_devices();
Tensor input_tensor = from_vector(std::vector<float>(num_devices, 0), ttnn::Shape{1, 1, 1, num_devices});
Tensor input_tensor =
from_vector(std::vector<float>(num_devices, 0), ttnn::SimpleShape{1, 1, 1, num_devices}, DataType::FLOAT32);

EXPECT_ANY_THROW({
auto mapper = api::shard_tensor_to_mesh_mapper(*mesh_device_, -1);
Expand All @@ -50,7 +50,8 @@ TEST_F(TensorDistributionTest, Shard1DInvalidDim) {
TEST_F(TensorDistributionTest, Shard1DTooFewShards) {
const int num_devices = mesh_device_->num_devices();
ASSERT_LT(3, num_devices);
Tensor input_tensor = from_vector(std::vector<float>{42.F, 13.F, -99.F}, ttnn::Shape{1, 1, 1, 3});
Tensor input_tensor =
from_vector(std::vector<float>{42.F, 13.F, -99.F}, ttnn::SimpleShape{1, 1, 1, 3}, DataType::FLOAT32);

EXPECT_ANY_THROW({
auto mapper = api::shard_tensor_to_mesh_mapper(*mesh_device_, 3);
Expand All @@ -64,23 +65,24 @@ TEST_F(TensorDistributionTest, Shard1D) {
for (int i = 0; i < num_devices; i++) {
test_data.insert(test_data.end(), {i * 1.F, i * 2.F, i * 3.F});
}
Tensor input_tensor = from_vector(test_data, ttnn::Shape{1, num_devices, 3, 1});
Tensor input_tensor = from_vector(test_data, ttnn::SimpleShape{1, num_devices, 3, 1}, DataType::FLOAT32);

auto mapper = api::shard_tensor_to_mesh_mapper(*mesh_device_, 1);
Tensor sharded_tensor = api::distribute_tensor(input_tensor, *mesh_device_, *mapper);

std::vector<Tensor> device_tensors = api::get_device_tensors(sharded_tensor);
EXPECT_EQ(device_tensors.size(), mesh_device_->num_devices());
for (int i = 0; i < device_tensors.size(); i++) {
auto expected = from_vector(std::vector<float>{i * 1.F, i * 2.F, i * 3.F}, ttnn::Shape{1, 1, 3, 1});
EXPECT_TRUE(ttnn::allclose<bfloat16>(device_tensors[i].cpu(), expected));
auto expected = from_vector(
std::vector<float>{i * 1.F, i * 2.F, i * 3.F}, ttnn::SimpleShape{1, 1, 3, 1}, DataType::FLOAT32);
EXPECT_TRUE(ttnn::allclose<float>(device_tensors[i].cpu(), expected));
}

auto composer = api::concat_mesh_to_tensor_composer(/*dim=*/0);
Tensor concatenated_tensor = api::aggregate_tensor(sharded_tensor, *composer);

Tensor expected_tensor = from_vector(test_data, ttnn::Shape{num_devices, 1, 3, 1});
EXPECT_TRUE(ttnn::allclose<bfloat16>(concatenated_tensor, expected_tensor));
Tensor expected_tensor = from_vector(test_data, ttnn::SimpleShape{num_devices, 1, 3, 1}, DataType::FLOAT32);
EXPECT_TRUE(ttnn::allclose<float>(concatenated_tensor, expected_tensor));
}

TEST_F(TensorDistributionTest, Shard2DInvalidMeshShape) {
Expand Down Expand Up @@ -110,7 +112,7 @@ TEST_F(TensorDistributionTest, Shard2DReplicateDim) {
const int num_devices = num_rows * num_cols;

std::vector<float> test_data = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0};
Tensor input_tensor = from_vector(test_data, ttnn::Shape{1, num_rows, num_cols, 1});
Tensor input_tensor = from_vector(test_data, ttnn::SimpleShape{1, num_rows, num_cols, 1}, DataType::FLOAT32);
input_tensor.print();

auto mapper = api::shard_tensor_2d_to_mesh_mapper(
Expand All @@ -127,12 +129,14 @@ TEST_F(TensorDistributionTest, Shard2DReplicateDim) {

int i = 0;
for (; i < 4; i++) {
auto expected = from_vector(std::vector<float>{0.0, 1.0, 2.0, 3.0}, ttnn::Shape{1, 1, 4, 1});
EXPECT_TRUE(ttnn::allclose<bfloat16>(device_tensors[i].cpu(), expected));
auto expected =
from_vector(std::vector<float>{0.0, 1.0, 2.0, 3.0}, ttnn::SimpleShape{1, 1, 4, 1}, DataType::FLOAT32);
EXPECT_TRUE(ttnn::allclose<float>(device_tensors[i].cpu(), expected));
}
for (; i < device_tensors.size(); i++) {
auto expected = from_vector(std::vector<float>{4.0, 5.0, 6.0, 7.0}, ttnn::Shape{1, 1, 4, 1});
EXPECT_TRUE(ttnn::allclose<bfloat16>(device_tensors[i].cpu(), expected));
auto expected =
from_vector(std::vector<float>{4.0, 5.0, 6.0, 7.0}, ttnn::SimpleShape{1, 1, 4, 1}, DataType::FLOAT32);
EXPECT_TRUE(ttnn::allclose<float>(device_tensors[i].cpu(), expected));
}
}

Expand All @@ -146,7 +150,7 @@ TEST_F(TensorDistributionTest, Shard2D) {
for (int i = 0; i < num_devices; i++) {
test_data.insert(test_data.end(), {i * 1.F, i * 2.F, i * 3.F});
}
Tensor input_tensor = from_vector(test_data, ttnn::Shape{1, num_rows, num_cols, 3});
Tensor input_tensor = from_vector(test_data, ttnn::SimpleShape{1, num_rows, num_cols, 3}, DataType::FLOAT32);

auto mapper = api::shard_tensor_2d_to_mesh_mapper(
*mesh_device_,
Expand All @@ -160,8 +164,9 @@ TEST_F(TensorDistributionTest, Shard2D) {
std::vector<Tensor> device_tensors = api::get_device_tensors(sharded_tensor);
EXPECT_EQ(device_tensors.size(), mesh_device_->num_devices());
for (int i = 0; i < device_tensors.size(); i++) {
auto expected = from_vector(std::vector<float>{i * 1.F, i * 2.F, i * 3.F}, ttnn::Shape{1, 1, 1, 3});
EXPECT_TRUE(ttnn::allclose<bfloat16>(device_tensors[i].cpu(), expected));
auto expected = from_vector(
std::vector<float>{i * 1.F, i * 2.F, i * 3.F}, ttnn::SimpleShape{1, 1, 1, 3}, DataType::FLOAT32);
EXPECT_TRUE(ttnn::allclose<float>(device_tensors[i].cpu(), expected));
}

auto composer = api::concat_mesh_2d_to_tensor_composer(
Expand All @@ -172,8 +177,8 @@ TEST_F(TensorDistributionTest, Shard2D) {
});
Tensor concatenated_tensor = api::aggregate_tensor(sharded_tensor, *composer);

Tensor expected_tensor = from_vector(test_data, ttnn::Shape{num_rows, 1, num_cols, 3});
EXPECT_TRUE(ttnn::allclose<bfloat16>(concatenated_tensor, expected_tensor));
Tensor expected_tensor = from_vector(test_data, ttnn::SimpleShape{num_rows, 1, num_cols, 3}, DataType::FLOAT32);
EXPECT_TRUE(ttnn::allclose<float>(concatenated_tensor, expected_tensor));
}

} // namespace ttnn::distributed::test
21 changes: 21 additions & 0 deletions tests/ttnn/unit_tests/gtests/tensor/test_partition.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
// SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC
//
// SPDX-License-Identifier: Apache-2.0

#include <gtest/gtest.h>

#include "ttnn/tensor/tensor.hpp"
#include "ttnn/tensor/xtensor/conversion_utils.hpp"
#include "ttnn/tensor/xtensor/partition.hpp"
#include "ttnn/tensor/xtensor/xtensor_all_includes.hpp"

namespace ttnn {
namespace {

using ::tt::tt_metal::Tensor;
using ::ttnn::experimental::xtensor::chunk;
using ::ttnn::experimental::xtensor::concatenate;
using ::ttnn::experimental::xtensor::from_vector;

} // namespace
} // namespace ttnn
99 changes: 99 additions & 0 deletions tests/ttnn/unit_tests/gtests/tensor/test_vector_conversion.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
// SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC
//
// SPDX-License-Identifier: Apache-2.0

#include <gtest/gtest.h>
#include <gmock/gmock.h>
#include <algorithm>
#include <cstdint>

#include "ttnn/tensor/tensor.hpp"
#include "ttnn/tensor/xtensor/conversion_utils.hpp"

namespace ttnn {
namespace {

using ::testing::Eq;
using ::testing::Pointwise;
using ::tt::tt_metal::Tensor;
using ::ttnn::experimental::xtensor::from_vector;
using ::ttnn::experimental::xtensor::to_vector;

const std::vector<ttnn::SimpleShape>& GetShapesForTest() {
static auto* shapes = new std::vector<ttnn::SimpleShape>{
ttnn::SimpleShape{1, 1, 1, 1},
ttnn::SimpleShape{1, 1, 1, 10},
ttnn::SimpleShape{1, 32, 32, 16},
ttnn::SimpleShape{1, 40, 3, 128},
ttnn::SimpleShape{2, 2},
ttnn::SimpleShape{1, 1, 1, 1, 10},
};
return *shapes;
}

template <typename T>
std::vector<T> Arange(int64_t start, int64_t end, int64_t step) {
std::vector<T> result;
for (int64_t i = start; i < end; i += step) {
if constexpr (std::is_same_v<T, ::bfloat16>) {
result.push_back(T(static_cast<float>(i)));
} else {
result.push_back(static_cast<T>(i));
}
}
return result;
}

template <typename T>
class VectorConversionTest : public ::testing::Test {};

using TestTypes = ::testing::Types<float, bfloat16, uint32_t, int32_t>;
TYPED_TEST_SUITE(VectorConversionTest, TestTypes);

TYPED_TEST(VectorConversionTest, Basic) {
for (const auto& shape : GetShapesForTest()) {
auto input = Arange<TypeParam>(0, static_cast<int64_t>(shape.volume()), 1);
auto output = to_vector<TypeParam>(from_vector(input, shape, convert_to_data_type<TypeParam>()));
EXPECT_THAT(output, Pointwise(Eq(), input)) << "for shape: " << shape;
}
}

TYPED_TEST(VectorConversionTest, InvalidSize) {
ttnn::SimpleShape shape{32, 32};
auto input = Arange<TypeParam>(0, 42, 1);

ASSERT_NE(input.size(), shape.volume());
EXPECT_ANY_THROW(from_vector(input, shape, convert_to_data_type<TypeParam>()));
}

TYPED_TEST(VectorConversionTest, InvalidDtype) {
ttnn::SimpleShape shape{32, 32};
auto input = Arange<TypeParam>(0, 42, 1);

ASSERT_NE(input.size(), shape.volume());
EXPECT_ANY_THROW(from_vector(
input,
shape,
// Use INT32 for verification, except for when the actual type is int32_t.
(std::is_same_v<TypeParam, int32_t> ? DataType::FLOAT32 : DataType::INT32)));
}

TEST(FloatVectorConversionTest, Bfloat16Representation) {
for (const auto& shape : GetShapesForTest()) {
auto input_bf16 = Arange<bfloat16>(0, static_cast<int64_t>(shape.volume()), 1);
std::vector<float> input_ft;
input_ft.reserve(input_bf16.size());
std::transform(input_bf16.begin(), input_bf16.end(), std::back_inserter(input_ft), [](bfloat16 bf) {
return bf.to_float();
});

auto output_bf16 = to_vector<bfloat16>(from_vector(input_ft, shape, DataType::BFLOAT16));
EXPECT_THAT(output_bf16, Pointwise(Eq(), input_bf16)) << "for shape: " << shape;

auto output_ft = to_vector<float>(from_vector(input_bf16, shape, DataType::BFLOAT16));
EXPECT_THAT(output_ft, Pointwise(Eq(), input_ft)) << "for shape: " << shape;
}
}

} // namespace
} // namespace ttnn
22 changes: 10 additions & 12 deletions ttnn/cpp/ttnn/distributed/distributed_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,35 +16,33 @@ namespace {

class ReplicateTensorToMesh : public TensorToMesh {
public:
ReplicateTensorToMesh(MeshDevice& mesh_device) : mesh_device_(mesh_device) {}
ReplicateTensorToMesh(int num_devices) : num_devices_(num_devices) {}

std::vector<Tensor> map(const Tensor& tensor) override {
std::vector<Tensor> tensors;
tensors.reserve(mesh_device_.num_devices());
std::fill_n(std::back_inserter(tensors), mesh_device_.num_devices(), tensor);
tensors.reserve(num_devices_);
std::fill_n(std::back_inserter(tensors), num_devices_, tensor);
return tensors;
}

DistributedTensorConfig config() const override {
return DistributedTensorConfig{ReplicateTensor{mesh_device_.num_devices()}};
}
DistributedTensorConfig config() const override { return DistributedTensorConfig{ReplicateTensor{num_devices_}}; }

private:
MeshDevice& mesh_device_;
int num_devices_ = -1;
};

class ShardTensorToMesh : public TensorToMesh {
public:
ShardTensorToMesh(MeshDevice& mesh_device, int dim) : mesh_device_(mesh_device), shard_dim_(dim) {}
ShardTensorToMesh(int num_devices, int dim) : num_devices_(num_devices), shard_dim_(dim) {}

std::vector<Tensor> map(const Tensor& tensor) override {
return experimental::xtensor::chunk(tensor, mesh_device_.num_devices(), shard_dim_);
return experimental::xtensor::chunk(tensor, num_devices_, shard_dim_);
}

DistributedTensorConfig config() const override { return DistributedTensorConfig{ShardTensor{shard_dim_}}; }

private:
MeshDevice& mesh_device_;
int num_devices_ = -1;
int shard_dim_ = -1;
};

Expand Down Expand Up @@ -144,11 +142,11 @@ class ConcatMesh2dToTensor : public MeshToTensor {
} // namespace

std::unique_ptr<TensorToMesh> replicate_tensor_to_mesh_mapper(MeshDevice& mesh_device) {
return std::make_unique<ReplicateTensorToMesh>(mesh_device);
return std::make_unique<ReplicateTensorToMesh>(mesh_device.num_devices());
}

std::unique_ptr<TensorToMesh> shard_tensor_to_mesh_mapper(MeshDevice& mesh_device, int dim) {
return std::make_unique<ShardTensorToMesh>(mesh_device, dim);
return std::make_unique<ShardTensorToMesh>(mesh_device.num_devices(), dim);
}

std::unique_ptr<TensorToMesh> shard_tensor_2d_to_mesh_mapper(
Expand Down
Loading

0 comments on commit eb71e44

Please sign in to comment.