Skip to content

Commit

Permalink
Removed the use of creation numpy creation functions
Browse files Browse the repository at this point in the history
  • Loading branch information
omilyutin-tt committed Dec 3, 2024
1 parent 01e5043 commit 23b8d7f
Show file tree
Hide file tree
Showing 7 changed files with 92 additions and 142 deletions.
18 changes: 7 additions & 11 deletions tests/tt_eager/ops/test_bcast_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,13 @@
// SPDX-License-Identifier: Apache-2.0

#include "tt_metal/host_api.hpp"
#include "ttnn/cpp/ttnn/operations/creation.hpp"
#include "ttnn/tensor/tensor.hpp"
#include "ttnn/operations/data_movement/bcast/bcast.hpp"
#include "common/constants.hpp"
#include <magic_enum.hpp>
#include <ttnn/operations/numpy/functions.hpp>

#include <algorithm>
#include <functional>
#include <random>

using namespace tt;
using namespace tt_metal;
using namespace constants;
Expand Down Expand Up @@ -53,9 +50,8 @@ int main(int argc, char** argv) {
}

Tensor a = ttnn::numpy::random::random(input_shape_a).to(Layout::TILE).to(device);
Tensor b = ttnn::numpy::zeros({1, 1, TILE_HEIGHT, TILE_WIDTH}, DataType::BFLOAT16)
.to(Layout::TILE)
.to(device);
Tensor b = ttnn::zeros(
ttnn::Shape({1, 1, TILE_HEIGHT, TILE_WIDTH}), DataType::BFLOAT16, Layout::TILE, *device);

for (auto bcast_math : magic_enum::enum_values<ttnn::BcastOpMath>()) {
Tensor c = ttnn::bcast(0, a, b, bcast_math, bcast_dim);
Expand All @@ -72,28 +68,28 @@ int main(int argc, char** argv) {

{
Tensor a = ttnn::numpy::random::random({1, 1, 32, 4544}).to(Layout::TILE).to(device);
Tensor b = ttnn::numpy::zeros({1, 1, 32, 4544}, DataType::BFLOAT16).to(Layout::TILE).to(device);
Tensor b = ttnn::zeros(ttnn::Shape({1, 1, 32, 4544}), DataType::BFLOAT16, Layout::TILE, *device);
Tensor c = ttnn::bcast(0, a, b, ttnn::BcastOpMath::MUL, ttnn::BcastOpDim::H);
Tensor d = c.cpu();
}

{
Tensor a = ttnn::numpy::random::random({1, 1, 32, 4544}).to(Layout::TILE).to(device);
Tensor b = ttnn::numpy::zeros({1, 1, 32, 4544}, DataType::BFLOAT16).to(Layout::TILE).to(device);
Tensor b = ttnn::zeros(ttnn::Shape({1, 1, 32, 4544}), DataType::BFLOAT16, Layout::TILE, *device);
Tensor c = ttnn::bcast(0, a, b, ttnn::BcastOpMath::ADD, ttnn::BcastOpDim::H);
Tensor d = c.cpu();
}

{
Tensor a = ttnn::numpy::random::random({1, 71, 32, 32}).to(Layout::TILE).to(device);
Tensor b = ttnn::numpy::zeros({1, 1, 32, 32}, DataType::BFLOAT16).to(Layout::TILE).to(device);
Tensor b = ttnn::zeros(ttnn::Shape({1, 1, 32, 32}), DataType::BFLOAT16, Layout::TILE, *device);
Tensor c = ttnn::bcast(0, a, b, ttnn::BcastOpMath::MUL, ttnn::BcastOpDim::HW);
Tensor d = c.cpu();
}

{
Tensor a = ttnn::numpy::random::random({1, 71, 32, 64}).to(Layout::TILE).to(device);
Tensor b = ttnn::numpy::zeros({1, 1, 32, 32}, DataType::BFLOAT16).to(Layout::TILE).to(device);
Tensor b = ttnn::zeros(ttnn::Shape({1, 1, 32, 32}), DataType::BFLOAT16, Layout::TILE, *device);
Tensor c = ttnn::bcast(0, a, b, ttnn::BcastOpMath::MUL, ttnn::BcastOpDim::HW);
Tensor d = c.cpu();
}
Expand Down
18 changes: 8 additions & 10 deletions tests/tt_eager/ops/test_bmm_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,13 @@
// SPDX-License-Identifier: Apache-2.0

#include "tt_metal/host_api.hpp"
#include "ttnn/cpp/ttnn/operations/creation.hpp"
#include "ttnn/tensor/tensor.hpp"
#include "ttnn/tensor/types.hpp"
#include "ttnn/operations/matmul/device/matmul_op.hpp"
#include "common/constants.hpp"
#include "ttnn/operations/numpy/functions.hpp"

#include <algorithm>
#include <functional>
#include <random>

using namespace tt;
using namespace tt_metal;
using namespace constants;
Expand All @@ -37,14 +35,14 @@ int main(int argc, char** argv) {
uint32_t Kt = 2;
uint32_t Nt = 4;
uint32_t B = 5;
tt::tt_metal::LegacyShape shapea = {B, 1, Mt * TILE_HEIGHT, Kt * TILE_WIDTH};
tt::tt_metal::LegacyShape shapeb = {B, 1, Kt * TILE_HEIGHT, Nt * TILE_WIDTH};
tt::tt_metal::LegacyShape shapeb1 = {1, 1, Kt * TILE_HEIGHT, Nt * TILE_WIDTH};
ttnn::Shape shapea({B, 1, Mt * TILE_HEIGHT, Kt * TILE_WIDTH});
ttnn::Shape shapeb({B, 1, Kt * TILE_HEIGHT, Nt * TILE_WIDTH});
ttnn::Shape shapeb1({1, 1, Kt * TILE_HEIGHT, Nt * TILE_WIDTH});

// Allocates a DRAM buffer on device populated with values specified by initialize
Tensor a = ttnn::numpy::random::random(shapea).to(Layout::TILE).to(device);
Tensor b = ttnn::numpy::zeros(shapeb, DataType::BFLOAT16).to(Layout::TILE).to(device);
Tensor b1 = ttnn::numpy::zeros(shapeb1, DataType::BFLOAT16).to(Layout::TILE).to(device);
Tensor a = ttnn::numpy::random::random(shapea.value).to(Layout::TILE).to(device);
Tensor b = ttnn::zeros(shapeb, DataType::BFLOAT16, Layout::TILE, *device);
Tensor b1 = ttnn::zeros(shapeb1, DataType::BFLOAT16, Layout::TILE, *device);

Tensor mm = ttnn::operations::matmul::matmul(
a,
Expand Down
86 changes: 46 additions & 40 deletions tests/tt_eager/tensors/test_async_tensor_apis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,11 @@
//
// SPDX-License-Identifier: Apache-2.0

#include <algorithm>
#include <chrono>
#include <functional>
#include <random>

#include "common/bfloat16.hpp"
#include "common/constants.hpp"
#include "ttnn/cpp/ttnn/operations/creation.hpp"
#include "ttnn/tensor/host_buffer/functions.hpp"
#include "ttnn/tensor/host_buffer/types.hpp"
#include "ttnn/tensor/tensor.hpp"
Expand All @@ -21,19 +19,19 @@
#include "ttnn/operations/eltwise/binary/binary.hpp"
#include "ttnn/operations/eltwise/unary/unary.hpp"

using namespace tt;
using namespace tt_metal;
using namespace constants;

namespace tt::tt_metal {
namespace {

using ::tt::constants::TILE_HEIGHT;
using ::tt::constants::TILE_WIDTH;

uint32_t get_device_buffer_address(const Tensor& tensor) {
TT_FATAL(std::holds_alternative<DeviceStorage>(tensor.get_storage()), "Tensor storage is not DeviceStorage");
auto buffer = std::get<DeviceStorage>(tensor.get_storage()).buffer;
uint32_t result = 0;
buffer->device()->push_work([&]() { result = buffer->address(); }, true);
return result;
}
} // namespace

TEST_F(DispatchFixture, TestTensorOwnershipSanity) {
// Sanity test tensor read, write and update paths with synchronous
Expand Down Expand Up @@ -122,18 +120,12 @@ TEST_F(DispatchFixture, TestAsyncEltwiseBinary) {

for (int i = 0; i < 5; i++) {
// Initialize tensors and move them to DRAM
Tensor input_tensor_a =
ttnn::numpy::full<float>(
tt::tt_metal::LegacyShape({1, 1, 1024, 1024}), static_cast<float>(i), DataType::BFLOAT16, Layout::TILE)
.to(device);
Tensor input_tensor_b =
ttnn::numpy::full<float>(
tt::tt_metal::LegacyShape({1, 1, 1024, 1024}), static_cast<float>(i), DataType::BFLOAT16, Layout::TILE)
.to(device);
Tensor input_tensor_c =
ttnn::numpy::full<float>(
tt::tt_metal::LegacyShape({1, 1, 1024, 1024}), static_cast<float>(i), DataType::BFLOAT16, Layout::TILE)
.to(device);
Tensor input_tensor_a = ttnn::full(
ttnn::Shape({1, 1, 1024, 1024}), static_cast<float>(i), DataType::BFLOAT16, Layout::TILE, *device);
Tensor input_tensor_b = ttnn::full(
ttnn::Shape({1, 1, 1024, 1024}), static_cast<float>(i), DataType::BFLOAT16, Layout::TILE, *device);
Tensor input_tensor_c = ttnn::full(
ttnn::Shape({1, 1, 1024, 1024}), static_cast<float>(i), DataType::BFLOAT16, Layout::TILE, *device);
Tensor output_tensor_device = ttnn::multiply(ttnn::add(input_tensor_a, input_tensor_b), input_tensor_c);
Tensor output_tensor_device_2 = ttnn::neg(ttnn::subtract(output_tensor_device, input_tensor_c));

Expand Down Expand Up @@ -181,12 +173,18 @@ TEST_F(DispatchFixture, TestAsyncRefCountManager) {
for (int i = 0; i < 5; i++) {
// Run for multiple loops to ensure deterministic behaviour with device addresses
// Initialize 2 tensors on device
Tensor tensor1 = ttnn::numpy::full<float>(
tt::tt_metal::LegacyShape({1, 1, 1024, 1024}), static_cast<float>(i), DataType::BFLOAT16)
.to(device);
Tensor tensor2 = ttnn::numpy::full<float>(
tt::tt_metal::LegacyShape({1, 1, 1024, 1024}), static_cast<float>(i), DataType::BFLOAT16)
.to(device);
Tensor tensor1 = ttnn::full(
ttnn::Shape({1, 1, 1024, 1024}),
static_cast<float>(i),
DataType::BFLOAT16,
/*layout=*/std::nullopt,
*device);
Tensor tensor2 = ttnn::full(
ttnn::Shape({1, 1, 1024, 1024}),
static_cast<float>(i),
DataType::BFLOAT16,
/*layout=*/std::nullopt,
*device);
uint32_t tensor2_device_buf_addr = get_device_buffer_address(tensor2);
// Assign tensor1 to tensor2 and ensure that ref counts are appropriately updated with the buffer for tensor2
// deallocated
Expand All @@ -195,18 +193,23 @@ TEST_F(DispatchFixture, TestAsyncRefCountManager) {
EXPECT_EQ(tensor1.tensor_attributes->main_thread_ref_count, 2);
// To check if tensor2 is deallocated, create a third tensor on device and ensure that its address matches the
// prev addr for tensor2
Tensor tensor3 = ttnn::numpy::full<float>(
tt::tt_metal::LegacyShape({1, 1, 1024, 1024}), static_cast<float>(i), DataType::BFLOAT16)
.to(device);
Tensor tensor3 = ttnn::full(
ttnn::Shape({1, 1, 1024, 1024}),
static_cast<float>(i),
DataType::BFLOAT16,
/*layout=*/std::nullopt,
*device);
EXPECT_EQ(get_device_buffer_address(tensor3), tensor2_device_buf_addr);
EXPECT_EQ(get_device_buffer_address(tensor1), get_device_buffer_address(tensor2));
}
log_info(LogTest, "Testing Device tensor self-assignment through function");
for (int i = 0; i < 5; i++) {
Tensor device_tensor =
ttnn::numpy::full<float>(
tt::tt_metal::LegacyShape({1, 1, 1024, 1024}), static_cast<float>(i), DataType::BFLOAT16)
.to(device);
Tensor device_tensor = ttnn::full(
ttnn::Shape({1, 1, 1024, 1024}),
static_cast<float>(i),
DataType::BFLOAT16,
/*layout=*/std::nullopt,
*device);
uint32_t device_tensor_address = get_device_buffer_address(device_tensor);
// This step will copy the tensor to a temp rval and std::move it back to the caller's instance of device_tensor
// Ensure ref count and address remain unchanged
Expand All @@ -217,18 +220,19 @@ TEST_F(DispatchFixture, TestAsyncRefCountManager) {

log_info(LogTest, "Testing Device tensor move assignment");
for (int i = 0; i < 5; i++) {
Tensor tensor1 = ttnn::numpy::full<float>(
tt::tt_metal::LegacyShape({1, 1, 1024, 1024}), static_cast<float>(i), DataType::BFLOAT16)
.to(device);
Tensor tensor1 = ttnn::full(
ttnn::Shape({1, 1, 1024, 1024}),
static_cast<float>(i),
DataType::BFLOAT16,
/*layout=*/std::nullopt,
*device);
Tensor tensor2 = std::move(tensor1);
EXPECT_EQ(tensor2.tensor_attributes->main_thread_ref_count, 1);
}

log_info(LogTest, "Testing Device tensor self-assignment");
Tensor tensor_to_self_assign =
ttnn::numpy::full<float>(
tt::tt_metal::LegacyShape({1, 1, 1024, 1024}), static_cast<float>(0), DataType::BFLOAT16)
.to(device);
Tensor tensor_to_self_assign = ttnn::full(
ttnn::Shape({1, 1, 1024, 1024}), static_cast<float>(0), DataType::BFLOAT16, /*layout=*/std::nullopt, *device);
uint32_t tensor_to_self_assign_address = get_device_buffer_address(tensor_to_self_assign);
tensor_to_self_assign = tensor_to_self_assign;
EXPECT_EQ(tensor_to_self_assign.tensor_attributes->main_thread_ref_count, 1);
Expand Down Expand Up @@ -338,3 +342,5 @@ TEST_F(DispatchFixture, TestTensorAsyncDataMovement) {
EXPECT_EQ(readback_tensor.get_layout(), Layout::ROW_MAJOR);
EXPECT_EQ(readback_tensor.get_shape(), ttnn::Shape(tt::tt_metal::LegacyShape({1, 1, 32, tensor_stop / 32})));
}
} // namespace
} // namespace tt::tt_metal
11 changes: 4 additions & 7 deletions tests/tt_eager/tensors/test_copy_and_move.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,9 @@
//
// SPDX-License-Identifier: Apache-2.0

#include <algorithm>
#include <functional>
#include <random>

#include "common/bfloat16.hpp"
#include "common/constants.hpp"
#include "ttnn/cpp/ttnn/operations/creation.hpp"
#include "ttnn/tensor/host_buffer/functions.hpp"
#include "ttnn/tensor/host_buffer/types.hpp"
#include "ttnn/tensor/tensor.hpp"
Expand Down Expand Up @@ -58,7 +55,7 @@ bool test_tensor_copy_semantics(Device* device) {
pass &= dev_a_data == host_d_copy_data;

// dev tensor updated with host tensor copy assignment
Tensor host_e = ttnn::numpy::ones(single_tile_shape).to(Layout::TILE);
Tensor host_e = ttnn::ones(single_tile_shape, DataType::BFLOAT16, Layout::TILE);
Tensor dev_e_copy = ttnn::numpy::random::random(single_tile_shape).to(Layout::TILE).to(device);
dev_e_copy = host_e;
pass &= (dev_e_copy.storage_type() == StorageType::OWNED);
Expand All @@ -67,8 +64,8 @@ bool test_tensor_copy_semantics(Device* device) {
pass &= host_e_data == dev_e_copy_data;

// dev tensor updated with dev tensor copy assignment
Tensor dev_b = ttnn::numpy::ones(single_tile_shape).to(Layout::TILE).to(device);
Tensor dev_b_copy = ttnn::numpy::zeros(single_tile_shape).to(Layout::TILE).to(device);
Tensor dev_b = ttnn::ones(single_tile_shape, DataType::BFLOAT16, Layout::TILE, *device);
Tensor dev_b_copy = ttnn::zeros(single_tile_shape, DataType::BFLOAT16, Layout::TILE, *device);
dev_b_copy = dev_b;
pass &= (dev_b_copy.storage_type() == StorageType::DEVICE);
auto dev_b_on_host = dev_b.cpu();
Expand Down
2 changes: 1 addition & 1 deletion tests/tt_metal/tt_metal/common/dispatch_fixture.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ class DispatchFixture : public ::testing::Test {
}
void ReadBuffer(
tt::tt_metal::Device* device,
std::shared_ptr<tt::tt_metal::Buffer> out_buffer,
const std::shared_ptr<tt::tt_metal::Buffer>& out_buffer,
std::vector<uint32_t>& dst_vec) {
if (this->slow_dispatch_) {
tt::tt_metal::detail::ReadFromBuffer(out_buffer, dst_vec);
Expand Down
Loading

0 comments on commit 23b8d7f

Please sign in to comment.