Skip to content

Commit

Permalink
Rename tt::tt_metal::Shape to LegacyShape to not conflict with TTNN (#…
Browse files Browse the repository at this point in the history
…12742)

#0: rename tt::tt_metal::Shape to LegacyShape to not conflict with ttnn::Shape
  • Loading branch information
ayerofieiev-tt authored Sep 17, 2024
1 parent cd9ee3b commit 3e27847
Show file tree
Hide file tree
Showing 293 changed files with 834 additions and 834 deletions.
8 changes: 4 additions & 4 deletions docs/source/ttnn/ttnn/dependencies/tt_lib.rst
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ New Device Operation
struct <NewOperation> {
void validate(const std::vector<Tensor> &input_tensors) const;
std::vector<Shape> compute_output_shapes(const std::vector<Tensor> &input_tensors) const;
std::vector<tt::tt_metal::LegacyShape> compute_output_shapes(const std::vector<Tensor> &input_tensors) const;
std::vector<Tensor> create_output_tensors(const std::vector<Tensor> &input_tensors) const;
operation::ProgramWithCallbacks create_program(const std::vector<Tensor>& input_tensors, std::vector<Tensor> &output_tensors) const;
};
Expand All @@ -48,7 +48,7 @@ New Device Operation with a member
int some_member
void validate(const std::vector<Tensor> &input_tensors) const;
std::vector<Shape> compute_output_shapes(const std::vector<Tensor> &input_tensors) const;
std::vector<tt::tt_metal::LegacyShape> compute_output_shapes(const std::vector<Tensor> &input_tensors) const;
std::vector<Tensor> create_output_tensors(const std::vector<Tensor> &input_tensors) const;
operation::ProgramWithCallbacks create_program(const std::vector<Tensor>& input_tensors, std::vector<Tensor> &output_tensors) const;
};
Expand All @@ -61,7 +61,7 @@ New Device Operation with Optional Input Tensors
struct <NewOperation> {
void validate(const std::vector<Tensor> &input_tensors,
const std::vector<std::optional<const Tensor>>& optional_input_tensors) const;
std::vector<Shape> compute_output_shapes(const std::vector<Tensor> &input_tensors) const;
std::vector<tt::tt_metal::LegacyShape> compute_output_shapes(const std::vector<Tensor> &input_tensors) const;
std::vector<Tensor> create_output_tensors(const std::vector<Tensor> &input_tensors) const;
operation::ProgramWithCallbacks create_program(
const std::vector<Tensor>& input_tensors,
Expand All @@ -80,7 +80,7 @@ and create_output_tensors with the additional parameter for the output_tensors.
struct <NewOperation> {
void validate_with_output_tensors(const std::vector<Tensor> &input_tensors, const std::vector<std::optional<Tensor>>& output_tensors) const;
std::vector<Shape> compute_output_shapes(const std::vector<Tensor> &input_tensors) const;
std::vector<tt::tt_metal::LegacyShape> compute_output_shapes(const std::vector<Tensor> &input_tensors) const;
std::vector<std::optional<Tensor>> create_output_tensors(const std::vector<Tensor> &input_tensors, const std::vector<std::optional<Tensor>>& output_tensors) const;
operation::ProgramWithOptionalOutputTensors create_program(const std::vector<Tensor>& input_tensors, std::vector<std::optional<Tensor>> &output_tensors) const;
Expand Down
2 changes: 1 addition & 1 deletion tests/tt_eager/integration_tests/test_bert.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,7 @@ void test_bert() {
"qa_head_bias",
ttnn::reshape(
tt::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::TILE).to(device, dram_memory_config),
ttnn::Shape{tt::tt_metal::Shape{{1, 1, 1, TILE_WIDTH}, {1, 1, TILE_HEIGHT, TILE_WIDTH}}}));
ttnn::Shape{tt::tt_metal::LegacyShape{{1, 1, 1, TILE_WIDTH}, {1, 1, TILE_HEIGHT, TILE_WIDTH}}}));

auto run_bert = [&]() {
tt::log_debug(tt::LogTest, "run_bert started");
Expand Down
10 changes: 5 additions & 5 deletions tests/tt_eager/ops/test_average_pool.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,9 @@ using tt::tt_metal::Device;
using tt::tt_metal::Tensor;
using tt::tt_metal::DataType;
using tt::tt_metal::Layout;
using tt::tt_metal::Shape;
using tt::tt_metal::LegacyShape;

Tensor run_avg_pool_2d_resnet(Shape& tensor_shape, Device* device) {
Tensor run_avg_pool_2d_resnet(tt::tt_metal::LegacyShape& tensor_shape, Device* device) {
using ttnn::operations::experimental::auto_format::AutoFormat;
auto input_tensor = tt::numpy::random::random(tensor_shape, DataType::BFLOAT16);
auto padded_input_shape = AutoFormat::pad_to_tile_shape(tensor_shape, false, false);
Expand All @@ -31,11 +31,11 @@ int main () {
int device_id = 0;
auto device = tt::tt_metal::CreateDevice(device_id);

Shape resnet18_shape = {1, 1, 7 * 7, 2048};
tt::tt_metal::LegacyShape resnet18_shape = {1, 1, 7 * 7, 2048};
auto result = run_avg_pool_2d_resnet(resnet18_shape, device);

TT_FATAL(result.get_legacy_shape() == Shape({1, 1, tt::constants::TILE_HEIGHT, 2048}), "Incorrect shape {}.", result.get_legacy_shape());
TT_FATAL(result.get_legacy_shape().without_padding() == Shape({1, 1, 1, 2048}), "Incorrect shape {}.", result.get_legacy_shape().without_padding());
TT_FATAL(result.get_legacy_shape() == tt::tt_metal::LegacyShape({1, 1, tt::constants::TILE_HEIGHT, 2048}), "Incorrect shape {}.", result.get_legacy_shape());
TT_FATAL(result.get_legacy_shape().without_padding() == tt::tt_metal::LegacyShape({1, 1, 1, 2048}), "Incorrect shape {}.", result.get_legacy_shape().without_padding());

TT_FATAL(tt::tt_metal::CloseDevice(device), "Error");
return 0;
Expand Down
2 changes: 1 addition & 1 deletion tests/tt_eager/ops/test_bcast_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ int main(int argc, char **argv) {
////////////////////////////////////////////////////////////////////////////
// Application Setup
////////////////////////////////////////////////////////////////////////////
auto shapes = std::vector<Shape>{{1, 1, TILE_HEIGHT, TILE_WIDTH}, {1, 1, TILE_HEIGHT * 2, TILE_WIDTH * 2}, {1, 1, TILE_HEIGHT * 3, TILE_WIDTH * 4}};
auto shapes = std::vector<tt::tt_metal::LegacyShape>{{1, 1, TILE_HEIGHT, TILE_WIDTH}, {1, 1, TILE_HEIGHT * 2, TILE_WIDTH * 2}, {1, 1, TILE_HEIGHT * 3, TILE_WIDTH * 4}};

auto run_operations = [&shapes, device] {
for (const auto shape : shapes) {
Expand Down
6 changes: 3 additions & 3 deletions tests/tt_eager/ops/test_bmm_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,9 @@ int main(int argc, char **argv) {
uint32_t Kt = 2;
uint32_t Nt = 4;
uint32_t B = 5;
Shape shapea = {B, 1, Mt*TILE_HEIGHT, Kt*TILE_WIDTH};
Shape shapeb = {B, 1, Kt*TILE_HEIGHT, Nt*TILE_WIDTH};
Shape shapeb1 = {1, 1, Kt*TILE_HEIGHT, Nt*TILE_WIDTH};
tt::tt_metal::LegacyShape shapea = {B, 1, Mt*TILE_HEIGHT, Kt*TILE_WIDTH};
tt::tt_metal::LegacyShape shapeb = {B, 1, Kt*TILE_HEIGHT, Nt*TILE_WIDTH};
tt::tt_metal::LegacyShape shapeb1 = {1, 1, Kt*TILE_HEIGHT, Nt*TILE_WIDTH};

// Allocates a DRAM buffer on device populated with values specified by initialize
Tensor a = tt::numpy::random::random(shapea).to(Layout::TILE).to(device);
Expand Down
20 changes: 10 additions & 10 deletions tests/tt_eager/ops/test_eltwise_binary_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ using tt::tt_metal::Device;
using tt::tt_metal::Layout;
using tt::tt_metal::Tensor;
using tt::tt_metal::OwnedStorage;
using tt::tt_metal::Shape;
using tt::tt_metal::LegacyShape;

template <typename BinaryFunction>
Tensor host_function(const Tensor& input_tensor_a, const Tensor& input_tensor_b) {
Expand All @@ -31,7 +31,7 @@ Tensor host_function(const Tensor& input_tensor_a, const Tensor& input_tensor_b)
}

template <auto HostFunction, typename DeviceFunction, typename... Args>
bool run_test(const Shape& shape, const DeviceFunction& device_function, Device* device, Args... args) {
bool run_test(const tt::tt_metal::LegacyShape& shape, const DeviceFunction& device_function, Device* device, Args... args) {
auto input_tensor_a = tt::numpy::random::random(shape, DataType::BFLOAT16);
auto input_tensor_b = tt::numpy::random::random(shape, DataType::BFLOAT16);

Expand All @@ -51,51 +51,51 @@ int main() {


{
Shape shape = {1, 1, tt::constants::TILE_HEIGHT, tt::constants::TILE_WIDTH};
tt::tt_metal::LegacyShape shape = {1, 1, tt::constants::TILE_HEIGHT, tt::constants::TILE_WIDTH};
auto allclose = run_test<host_function<std::plus<float>>>(shape, ttnn::add, device);
TT_FATAL(allclose, "Error");
}

{
Shape shape = {1, 1, tt::constants::TILE_HEIGHT, tt::constants::TILE_WIDTH};
tt::tt_metal::LegacyShape shape = {1, 1, tt::constants::TILE_HEIGHT, tt::constants::TILE_WIDTH};
auto allclose = run_test<host_function<std::minus<float>>>(shape, ttnn::subtract, device);
TT_FATAL(allclose, "Error");
}

{
Shape shape = {1, 1, tt::constants::TILE_HEIGHT, tt::constants::TILE_WIDTH};
tt::tt_metal::LegacyShape shape = {1, 1, tt::constants::TILE_HEIGHT, tt::constants::TILE_WIDTH};
auto allclose = run_test<host_function<std::multiplies<float>>>(shape, ttnn::multiply, device, 1e-2f, 1e-3f);
TT_FATAL(allclose, "Error");
}

auto run_binary_ops = [&] {
{
Shape shape = {1, 1, tt::constants::TILE_HEIGHT, tt::constants::TILE_WIDTH};
tt::tt_metal::LegacyShape shape = {1, 1, tt::constants::TILE_HEIGHT, tt::constants::TILE_WIDTH};
auto allclose = run_test<host_function<std::plus<float>>>(shape, ttnn::add, device);
TT_FATAL(allclose, "Error");
}

{
Shape shape = {1, 1, tt::constants::TILE_HEIGHT, tt::constants::TILE_WIDTH};
tt::tt_metal::LegacyShape shape = {1, 1, tt::constants::TILE_HEIGHT, tt::constants::TILE_WIDTH};
auto allclose = run_test<host_function<std::minus<float>>>(shape, ttnn::subtract, device);
TT_FATAL(allclose, "Error");
}

{
Shape shape = {1, 1, tt::constants::TILE_HEIGHT * 2, tt::constants::TILE_WIDTH * 2};
tt::tt_metal::LegacyShape shape = {1, 1, tt::constants::TILE_HEIGHT * 2, tt::constants::TILE_WIDTH * 2};
auto allclose = run_test<host_function<std::plus<float>>>(shape, ttnn::add, device);
TT_FATAL(allclose, "Error");
}

{
Shape shape = {1, 1, tt::constants::TILE_HEIGHT, tt::constants::TILE_WIDTH};
tt::tt_metal::LegacyShape shape = {1, 1, tt::constants::TILE_HEIGHT, tt::constants::TILE_WIDTH};
auto allclose =
run_test<host_function<std::multiplies<float>>>(shape, ttnn::multiply, device, 1e-2f, 1e-3f);
TT_FATAL(allclose, "Error");
}

{
Shape shape = {1, 1, tt::constants::TILE_HEIGHT * 4, tt::constants::TILE_WIDTH * 4};
tt::tt_metal::LegacyShape shape = {1, 1, tt::constants::TILE_HEIGHT * 4, tt::constants::TILE_WIDTH * 4};
auto allclose = run_test<host_function<std::plus<float>>>(shape, ttnn::add, device);
TT_FATAL(allclose, "Error");
}
Expand Down
13 changes: 7 additions & 6 deletions tests/tt_eager/ops/test_eltwise_unary_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ using tt::tt_metal::Device;

using tt::tt_metal::Layout;
using tt::tt_metal::OwnedStorage;
using tt::tt_metal::Shape;
using tt::tt_metal::LegacyShape;
using tt::tt_metal::Tensor;

namespace detail {
Expand Down Expand Up @@ -57,7 +57,7 @@ Tensor host_function(const Tensor& input_tensor) {
}

template <ttnn::operations::unary::UnaryOpType unary_op_type, typename... Args>
bool run_test(Device* device, const Shape& shape, float low, float high, Args... args) {
bool run_test(Device* device, const tt::tt_metal::LegacyShape& shape, float low, float high, Args... args) {
auto input_tensor = tt::numpy::random::uniform(bfloat16(low), bfloat16(high), shape).to(Layout::TILE);

using ttnn::operations::unary::UnaryWithParam;
Expand Down Expand Up @@ -110,7 +110,7 @@ void test_operation_infrastructure() {
int device_id = 0;
auto device = tt::tt_metal::CreateDevice(device_id);

auto shape = Shape{1, 1, TILE_HEIGHT, TILE_WIDTH};
auto shape = tt::tt_metal::LegacyShape{1, 1, TILE_HEIGHT, TILE_WIDTH};
auto input_tensor = tt::numpy::random::uniform(bfloat16(0), bfloat16(1), shape).to(Layout::TILE).to(device);

ttnn::operations::unary::operation_attributes_t op_args {
Expand Down Expand Up @@ -149,8 +149,9 @@ void test_shape_padding() {
output_tensor = output_tensor.cpu();

auto output_shape = output_tensor.get_legacy_shape();
TT_FATAL(output_shape == tt::tt_metal::Shape(padded_input_shape), "Error");
TT_FATAL(output_shape.without_padding() == tt::tt_metal::Shape(input_shape), "Error");

TT_FATAL(output_shape == tt::tt_metal::LegacyShape(padded_input_shape), "Error");
TT_FATAL(output_shape.without_padding() == tt::tt_metal::LegacyShape(input_shape), "Error");

TT_FATAL(tt::tt_metal::CloseDevice(device), "Error");
}
Expand All @@ -177,7 +178,7 @@ void test_numerically() {
int device_id = 0;
auto device = tt::tt_metal::CreateDevice(device_id);

auto shape = Shape{1, 1, TILE_HEIGHT, TILE_WIDTH};
auto shape = tt::tt_metal::LegacyShape{1, 1, TILE_HEIGHT, TILE_WIDTH};
{
auto allclose = run_test<UnaryOpType::SQRT>(device, shape, 0.0f, 1.0f, 1e-1f, 1e-5f);
TT_FATAL(allclose, "Error");
Expand Down
2 changes: 1 addition & 1 deletion tests/tt_eager/ops/test_fold_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ using namespace tt;
using namespace tt::tt_metal;
using namespace constants;

void run_fold(Device *device, Shape shape) {
void run_fold(Device *device, tt::tt_metal::LegacyShape shape) {
Tensor input_tensor = tt::numpy::random::random(shape).to(Layout::ROW_MAJOR).to(device);
uint32_t stride_h = 2;
uint32_t stride_w = 2;
Expand Down
2 changes: 1 addition & 1 deletion tests/tt_eager/ops/test_layernorm_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ int main(int argc, char **argv) {
////////////////////////////////////////////////////////////////////////////
int device_id = 0;
tt_metal::Device *device = tt_metal::CreateDevice(device_id);
Shape shape = {1, 1, TILE_HEIGHT, TILE_WIDTH};
tt::tt_metal::LegacyShape shape = {1, 1, TILE_HEIGHT, TILE_WIDTH};
Tensor a = tt::numpy::random::random(shape).to(Layout::TILE).to(device);
Tensor c = ttnn::layer_norm(a, 1e-4f);
Tensor d = c.cpu();
Expand Down
5 changes: 3 additions & 2 deletions tests/tt_eager/ops/test_pad_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,9 @@ void test_operation_infrastructure() {
auto output_tensor = ttnn::pad(input_tensor, padded_shape, tt::tt_metal::Array4D({0, 0, 0, 0}), 0);

auto output_shape = output_tensor.get_legacy_shape();
TT_FATAL(output_shape == tt::tt_metal::Shape(padded_shape), "Error");
TT_FATAL(output_shape.without_padding() == tt::tt_metal::Shape(input_shape), "Error");

TT_FATAL(output_shape == tt::tt_metal::LegacyShape(padded_shape), "Error");
TT_FATAL(output_shape.without_padding() == tt::tt_metal::LegacyShape(input_shape), "Error");
}

int main(int argc, char** argv) {
Expand Down
14 changes: 7 additions & 7 deletions tests/tt_eager/ops/test_sliding_window_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
#include "ttnn/deprecated/tt_numpy/functions.hpp"
#include "ttnn/tensor/types.hpp"

using tt::tt_metal::Shape;
using tt::tt_metal::LegacyShape;
using tt::tt_metal::Tensor;
using namespace ttnn::operations::sliding_window;

Expand Down Expand Up @@ -370,12 +370,12 @@ int main() {
.pad_hw = {tc.pad_h, tc.pad_w},
.dilation_hw = {1, 1},
.num_cores_nhw = tc.num_cores_nhw};
Shape input_tensor_shape = {
tt::tt_metal::LegacyShape input_tensor_shape = {
config.batch_size,
config.input_hw.first + 2 * config.pad_hw.first,
config.input_hw.second + 2 * config.pad_hw.second};
Shape output_tensor_shape = config.get_output_shape().value;
Shape filter_tensor_shape = {config.window_hw.first, config.window_hw.second};
tt::tt_metal::LegacyShape output_tensor_shape = config.get_output_shape().value;
tt::tt_metal::LegacyShape filter_tensor_shape = {config.window_hw.first, config.window_hw.second};

Tensor input_padded_tensor =
tt::numpy::random::random(input_tensor_shape, DataType::BFLOAT16).to(Layout::ROW_MAJOR).cpu();
Expand All @@ -387,12 +387,12 @@ int main() {
vector<float> filter_vector = create_filter_vec(filter_tensor_buf, tc.filter_h, tc.filter_w);
owned_buffer::Buffer<bfloat16> out_golden_tensor_buf = ref_conv_op(
input_padded_tensor,
ttnn::types::Shape(input_tensor_shape),
ttnn::Shape(input_tensor_shape),
tc.stride_h,
tc.stride_w,
filter_vector,
ttnn::types::Shape(filter_tensor_shape),
ttnn::types::Shape(output_tensor_shape));
ttnn::Shape(filter_tensor_shape),
ttnn::Shape(output_tensor_shape));

auto failed_tests = validate_generate_functions(
device,
Expand Down
2 changes: 1 addition & 1 deletion tests/tt_eager/ops/test_softmax_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ using namespace tt;
using namespace tt::tt_metal;
using namespace constants;

void run_softmax(Device* device, Shape shape) {
void run_softmax(Device* device, tt::tt_metal::LegacyShape shape) {
Tensor input_tensor = tt::numpy::random::random(shape).to(Layout::TILE).to(device);
Tensor device_output_tensor = ttnn::softmax_in_place(input_tensor);
Tensor output_tensor = device_output_tensor.cpu();
Expand Down
2 changes: 1 addition & 1 deletion tests/tt_eager/ops/test_tilize_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ int main(int argc, char **argv) {
////////////////////////////////////////////////////////////////////////////
// Application Setup
////////////////////////////////////////////////////////////////////////////
Shape shape = {1, 64, 32, 64};
tt::tt_metal::LegacyShape shape = {1, 64, 32, 64};
// Allocates a DRAM buffer on device populated with values specified by initialize
Tensor a = tt::numpy::random::random(shape).to(device);
Tensor b = ttnn::tilize(a);
Expand Down
2 changes: 1 addition & 1 deletion tests/tt_eager/ops/test_tilize_op_channels_last.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ int main(int argc, char **argv) {
////////////////////////////////////////////////////////////////////////////
// Application Setup
////////////////////////////////////////////////////////////////////////////
Shape shape = {1, 32, 32, 64};
tt::tt_metal::LegacyShape shape = {1, 32, 32, 64};
// Allocates a DRAM buffer on device populated with values specified by initialize
Tensor a = tt::numpy::random::random(shape).to(device);
Tensor b = ttnn::tilize(a);
Expand Down
2 changes: 1 addition & 1 deletion tests/tt_eager/ops/test_tilize_zero_padding.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ int main(int argc, char **argv) {
////////////////////////////////////////////////////////////////////////////
// Application Setup
////////////////////////////////////////////////////////////////////////////
Shape shape = {1, 32, 45, 64};
tt::tt_metal::LegacyShape shape = {1, 32, 45, 64};
// Allocates a DRAM buffer on device populated with values specified by initialize
Tensor a = tt::numpy::random::random(shape).to(device);
Tensor b = ttnn::tilize_with_zero_padding(a);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ int main(int argc, char **argv) {
////////////////////////////////////////////////////////////////////////////
// Application Setup
////////////////////////////////////////////////////////////////////////////
Shape shape = {1, 32, 61, 32};
tt::tt_metal::LegacyShape shape = {1, 32, 61, 32};
// Allocates a DRAM buffer on device populated with values specified by initialize
Tensor a = tt::numpy::arange<bfloat16>(0, tt_metal::compute_volume(shape), 1).reshape(shape).to(device);
Tensor b = ttnn::tilize_with_zero_padding(a);
Expand Down
2 changes: 1 addition & 1 deletion tests/tt_eager/ops/test_transpose_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ int main(int argc, char **argv) {
////////////////////////////////////////////////////////////////////////////
// Application Setup
////////////////////////////////////////////////////////////////////////////
Shape shape = {1, 1, TILE_HEIGHT, TILE_WIDTH};
tt::tt_metal::LegacyShape shape = {1, 1, TILE_HEIGHT, TILE_WIDTH};
// Allocates a DRAM buffer on device populated with values specified by initialize
Tensor a = tt::numpy::random::random(shape).to(Layout::TILE).to(device);

Expand Down
2 changes: 1 addition & 1 deletion tests/tt_eager/ops/test_transpose_wh_multi_core.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ int main(int argc, char **argv) {
////////////////////////////////////////////////////////////////////////////
// Application Setup
////////////////////////////////////////////////////////////////////////////
Shape shape = {1, 1, 10*TILE_HEIGHT, 12*TILE_WIDTH};
tt::tt_metal::LegacyShape shape = {1, 1, 10*TILE_HEIGHT, 12*TILE_WIDTH};
// Allocates a DRAM buffer on device populated with values specified by initialize
Tensor a = tt::numpy::random::random(shape).to(Layout::TILE).to(device);

Expand Down
2 changes: 1 addition & 1 deletion tests/tt_eager/ops/test_transpose_wh_single_core.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ int main(int argc, char **argv) {
////////////////////////////////////////////////////////////////////////////
// Application Setup
////////////////////////////////////////////////////////////////////////////
Shape shape = {1, 1, 10*TILE_HEIGHT, 12*TILE_WIDTH};
tt::tt_metal::LegacyShape shape = {1, 1, 10*TILE_HEIGHT, 12*TILE_WIDTH};
// Allocates a DRAM buffer on device populated with values specified by initialize
Tensor a = tt::numpy::random::random(shape).to(Layout::TILE).to(device);

Expand Down
Loading

0 comments on commit 3e27847

Please sign in to comment.