Skip to content

Commit

Permalink
#14974: Remove numpy/ directory and the namespace in ttnn (#15852)
Browse files Browse the repository at this point in the history
### Ticket
#14974

### Problem description
Follow up from #15847 -
remove usages of `numpy` namespace.

### What's changed
* Removed `ttnn::numpy` namespace and the corresponding directory.
* Moved `.../operations/numpy/functions.hpp` in `.../operations/`.

### Checklist
- [X] [Post commit CI
passes](https://github.com/tenstorrent/tt-metal/actions/runs/12249474071)
- [X] New/Existing tests provide coverage for changes
  • Loading branch information
omilyutin-tt authored Dec 10, 2024
1 parent b32ae29 commit ff50e72
Show file tree
Hide file tree
Showing 39 changed files with 134 additions and 153 deletions.
46 changes: 17 additions & 29 deletions tests/tt_eager/integration_tests/test_bert.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
#include "ttnn/operations/normalization/softmax/softmax.hpp"
#include "tt_metal/common/constants.hpp"
#include "tt_metal/host_api.hpp"
#include "ttnn/operations/numpy/functions.hpp"
#include "ttnn/operations/functions.hpp"
#include "ttnn/operations/matmul/matmul.hpp"
#include "ttnn/operations/normalization/layernorm/layernorm.hpp"
#include "ttnn/operations/eltwise/binary/binary.hpp"
Expand Down Expand Up @@ -228,89 +228,77 @@ void test_bert() {
std::uint32_t intermediate_size = hidden_size * 4;

auto attention_mask =
ttnn::numpy::random::uniform(
ttnn::random::uniform(
bfloat16(-1.0f), bfloat16(1.0f), {batch_size, 1, TILE_HEIGHT, sequence_size}, Layout::TILE)
.to(device, l1_memory_config);

auto parameters = Parameters{};
for (auto encoder_index = 0; encoder_index < num_encoders; encoder_index++) {
parameters.emplace(
fmt::format("fused_qkv_weight_{}", encoder_index),
ttnn::numpy::random::uniform(
bfloat16(-1.0f), bfloat16(1.0f), {1, 1, hidden_size, hidden_size * 3}, Layout::TILE)
ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, hidden_size, hidden_size * 3}, Layout::TILE)
.to(device, dram_memory_config));
parameters.emplace(
fmt::format("fused_qkv_bias_{}", encoder_index),
ttnn::numpy::random::uniform(
bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, hidden_size * 3}, Layout::TILE)
ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, hidden_size * 3}, Layout::TILE)
.to(device, dram_memory_config));
parameters.emplace(
fmt::format("selfout_weight_{}", encoder_index),
ttnn::numpy::random::uniform(
bfloat16(-1.0f), bfloat16(1.0f), {1, 1, hidden_size, hidden_size}, Layout::TILE)
ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, hidden_size, hidden_size}, Layout::TILE)
.to(device, dram_memory_config));
parameters.emplace(
fmt::format("selfout_bias_{}", encoder_index),
ttnn::numpy::random::uniform(
bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, hidden_size}, Layout::TILE)
ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, hidden_size}, Layout::TILE)
.to(device, dram_memory_config));
parameters.emplace(
fmt::format("attention_layernorm_weight_{}", encoder_index),
ttnn::numpy::random::uniform(
bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::ROW_MAJOR)
ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::ROW_MAJOR)
.to(device, dram_memory_config));
parameters.emplace(
fmt::format("attention_layernorm_bias_{}", encoder_index),
ttnn::numpy::random::uniform(
bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::ROW_MAJOR)
ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::ROW_MAJOR)
.to(device, dram_memory_config));
parameters.emplace(
fmt::format("ff1_weight_{}", encoder_index),
ttnn::numpy::random::uniform(
bfloat16(-1.0f), bfloat16(1.0f), {1, 1, hidden_size, intermediate_size}, Layout::TILE)
ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, hidden_size, intermediate_size}, Layout::TILE)
.to(device, dram_memory_config));
parameters.emplace(
fmt::format("ff1_bias_{}", encoder_index),
ttnn::numpy::random::uniform(
bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, intermediate_size}, Layout::TILE)
ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, intermediate_size}, Layout::TILE)
.to(device, dram_memory_config));
parameters.emplace(
fmt::format("ff2_weight_{}", encoder_index),
ttnn::numpy::random::uniform(
bfloat16(-1.0f), bfloat16(1.0f), {1, 1, intermediate_size, hidden_size}, Layout::TILE)
ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, intermediate_size, hidden_size}, Layout::TILE)
.to(device, dram_memory_config));
parameters.emplace(
fmt::format("ff2_bias_{}", encoder_index),
ttnn::numpy::random::uniform(
bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, hidden_size}, Layout::TILE)
ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, hidden_size}, Layout::TILE)
.to(device, dram_memory_config));
parameters.emplace(
fmt::format("feedforward_layernorm_weight_{}", encoder_index),
ttnn::numpy::random::uniform(
bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::ROW_MAJOR)
ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::ROW_MAJOR)
.to(device, dram_memory_config));
parameters.emplace(
fmt::format("feedforward_layernorm_bias_{}", encoder_index),
ttnn::numpy::random::uniform(
bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::ROW_MAJOR)
ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::ROW_MAJOR)
.to(device, dram_memory_config));
};
parameters.emplace(
"qa_head_weight",
ttnn::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, hidden_size, TILE_WIDTH}, Layout::TILE)
ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, hidden_size, TILE_WIDTH}, Layout::TILE)
.to(device, dram_memory_config));
parameters.emplace(
"qa_head_bias",
ttnn::reshape(
ttnn::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::TILE)
ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::TILE)
.to(device, dram_memory_config),
ttnn::Shape{tt::tt_metal::LegacyShape{{1, 1, 1, TILE_WIDTH}, {1, 1, TILE_HEIGHT, TILE_WIDTH}}}));

auto run_bert = [&]() {
tt::log_debug(tt::LogTest, "run_bert started");
auto begin = std::chrono::steady_clock::now();
auto hidden_states =
ttnn::numpy::random::uniform(
ttnn::random::uniform(
bfloat16(-1.0f), bfloat16(1.0f), {batch_size, 1, sequence_size, hidden_size}, Layout::TILE)
.to(device, l1_memory_config);
for (auto encoder_index = 0; encoder_index < num_encoders; encoder_index++) {
Expand Down
4 changes: 2 additions & 2 deletions tests/tt_eager/ops/test_average_pool.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

#include "ttnn/operations/pool/global_avg_pool/global_avg_pool.hpp"
#include "ttnn/operations/experimental/auto_format/auto_format.hpp"
#include "ttnn/operations/numpy/functions.hpp"
#include "ttnn/operations/functions.hpp"

#include "ttnn/tensor/tensor.hpp"
#include "common/constants.hpp"
Expand All @@ -17,7 +17,7 @@ using tt::tt_metal::Tensor;

Tensor run_avg_pool_2d_resnet(tt::tt_metal::LegacyShape& tensor_shape, Device* device) {
using ttnn::operations::experimental::auto_format::AutoFormat;
auto input_tensor = ttnn::numpy::random::random(tensor_shape, DataType::BFLOAT16);
auto input_tensor = ttnn::random::random(tensor_shape, DataType::BFLOAT16);
auto padded_input_shape = AutoFormat::pad_to_tile_shape(tensor_shape, false, false);
Tensor padded_input_tensor = input_tensor;
if (!AutoFormat::check_input_tensor_format(input_tensor, padded_input_shape)) {
Expand Down
12 changes: 6 additions & 6 deletions tests/tt_eager/ops/test_bcast_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
#include "ttnn/operations/data_movement/bcast/bcast.hpp"
#include "common/constants.hpp"
#include <magic_enum.hpp>
#include <ttnn/operations/numpy/functions.hpp>
#include <ttnn/operations/functions.hpp>

using namespace tt;
using namespace tt_metal;
Expand Down Expand Up @@ -49,7 +49,7 @@ int main(int argc, char** argv) {
throw std::runtime_error("Unsupported Dim!");
}

Tensor a = ttnn::numpy::random::random(input_shape_a).to(Layout::TILE).to(device);
Tensor a = ttnn::random::random(input_shape_a).to(Layout::TILE).to(device);
Tensor b = ttnn::zeros(
ttnn::Shape({1, 1, TILE_HEIGHT, TILE_WIDTH}), DataType::BFLOAT16, Layout::TILE, *device);

Expand All @@ -67,28 +67,28 @@ int main(int argc, char** argv) {
}

{
Tensor a = ttnn::numpy::random::random({1, 1, 32, 4544}).to(Layout::TILE).to(device);
Tensor a = ttnn::random::random({1, 1, 32, 4544}).to(Layout::TILE).to(device);
Tensor b = ttnn::zeros(ttnn::Shape({1, 1, 32, 4544}), DataType::BFLOAT16, Layout::TILE, *device);
Tensor c = ttnn::bcast(0, a, b, ttnn::BcastOpMath::MUL, ttnn::BcastOpDim::H);
Tensor d = c.cpu();
}

{
Tensor a = ttnn::numpy::random::random({1, 1, 32, 4544}).to(Layout::TILE).to(device);
Tensor a = ttnn::random::random({1, 1, 32, 4544}).to(Layout::TILE).to(device);
Tensor b = ttnn::zeros(ttnn::Shape({1, 1, 32, 4544}), DataType::BFLOAT16, Layout::TILE, *device);
Tensor c = ttnn::bcast(0, a, b, ttnn::BcastOpMath::ADD, ttnn::BcastOpDim::H);
Tensor d = c.cpu();
}

{
Tensor a = ttnn::numpy::random::random({1, 71, 32, 32}).to(Layout::TILE).to(device);
Tensor a = ttnn::random::random({1, 71, 32, 32}).to(Layout::TILE).to(device);
Tensor b = ttnn::zeros(ttnn::Shape({1, 1, 32, 32}), DataType::BFLOAT16, Layout::TILE, *device);
Tensor c = ttnn::bcast(0, a, b, ttnn::BcastOpMath::MUL, ttnn::BcastOpDim::HW);
Tensor d = c.cpu();
}

{
Tensor a = ttnn::numpy::random::random({1, 71, 32, 64}).to(Layout::TILE).to(device);
Tensor a = ttnn::random::random({1, 71, 32, 64}).to(Layout::TILE).to(device);
Tensor b = ttnn::zeros(ttnn::Shape({1, 1, 32, 32}), DataType::BFLOAT16, Layout::TILE, *device);
Tensor c = ttnn::bcast(0, a, b, ttnn::BcastOpMath::MUL, ttnn::BcastOpDim::HW);
Tensor d = c.cpu();
Expand Down
4 changes: 2 additions & 2 deletions tests/tt_eager/ops/test_bmm_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
#include "ttnn/tensor/types.hpp"
#include "ttnn/operations/matmul/device/matmul_op.hpp"
#include "common/constants.hpp"
#include "ttnn/operations/numpy/functions.hpp"
#include "ttnn/operations/functions.hpp"

using namespace tt;
using namespace tt_metal;
Expand Down Expand Up @@ -40,7 +40,7 @@ int main(int argc, char** argv) {
ttnn::Shape shapeb1({1, 1, Kt * TILE_HEIGHT, Nt * TILE_WIDTH});

// Allocates a DRAM buffer on device populated with values specified by initialize
Tensor a = ttnn::numpy::random::random(shapea.value).to(Layout::TILE).to(device);
Tensor a = ttnn::random::random(shapea.value).to(Layout::TILE).to(device);
Tensor b = ttnn::zeros(shapeb, DataType::BFLOAT16, Layout::TILE, *device);
Tensor b1 = ttnn::zeros(shapeb1, DataType::BFLOAT16, Layout::TILE, *device);

Expand Down
10 changes: 5 additions & 5 deletions tests/tt_eager/ops/test_eltwise_binary_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
#include "ttnn/tensor/host_buffer/types.hpp"
#include "ttnn/tensor/tensor.hpp"
#include "ttnn/operations/eltwise/binary/binary.hpp"
#include "ttnn/operations/numpy/functions.hpp"
#include "ttnn/operations/functions.hpp"

using tt::tt_metal::DataType;
using tt::tt_metal::Device;
Expand Down Expand Up @@ -37,16 +37,16 @@ Tensor host_function(const Tensor& input_tensor_a, const Tensor& input_tensor_b)
template <auto HostFunction, typename DeviceFunction, typename... Args>
bool run_test(
const tt::tt_metal::LegacyShape& shape, const DeviceFunction& device_function, Device* device, Args... args) {
auto input_tensor_a = ttnn::numpy::random::random(shape, DataType::BFLOAT16);
auto input_tensor_b = ttnn::numpy::random::random(shape, DataType::BFLOAT16);
auto input_tensor_a = ttnn::random::random(shape, DataType::BFLOAT16);
auto input_tensor_b = ttnn::random::random(shape, DataType::BFLOAT16);

auto host_output = HostFunction(input_tensor_a, input_tensor_b);
auto device_output =
device_function(input_tensor_a.to(Layout::TILE).to(device), input_tensor_b.to(Layout::TILE).to(device))
.cpu()
.to(Layout::ROW_MAJOR);

return ttnn::numpy::allclose<bfloat16>(host_output, device_output, args...);
return ttnn::allclose<bfloat16>(host_output, device_output, args...);
}

int main() {
Expand Down Expand Up @@ -114,7 +114,7 @@ int main() {

// Allocate a tensor to show that the addresses aren't cached
auto input_tensor =
ttnn::numpy::random::uniform(bfloat16(0.0f), bfloat16(0.0f), {1, 1, 32, 32}).to(Layout::TILE).to(device);
ttnn::random::uniform(bfloat16(0.0f), bfloat16(0.0f), {1, 1, 32, 32}).to(Layout::TILE).to(device);

run_binary_ops();

Expand Down
26 changes: 13 additions & 13 deletions tests/tt_eager/ops/test_eltwise_unary_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
#include "ttnn/operations/data_movement/pad/pad.hpp"
#include "ttnn/operation.hpp"
#include "tt_metal/host_api.hpp"
#include "ttnn/operations/numpy/functions.hpp"
#include "ttnn/operations/functions.hpp"

using tt::tt_metal::DataType;
using tt::tt_metal::Device;
Expand Down Expand Up @@ -58,43 +58,43 @@ Tensor host_function(const Tensor& input_tensor) {

template <ttnn::operations::unary::UnaryOpType unary_op_type, typename... Args>
bool run_test(Device* device, const tt::tt_metal::LegacyShape& shape, float low, float high, Args... args) {
auto input_tensor = ttnn::numpy::random::uniform(bfloat16(low), bfloat16(high), shape).to(Layout::TILE);
auto input_tensor = ttnn::random::uniform(bfloat16(low), bfloat16(high), shape).to(Layout::TILE);

using ttnn::operations::unary::UnaryOpType;
using ttnn::operations::unary::UnaryWithParam;

if constexpr (unary_op_type == UnaryOpType::SQRT) {
auto host_output = host_function<::detail::sqrt>(input_tensor);
auto device_output = ttnn::sqrt(input_tensor.to(device)).cpu();
return ttnn::numpy::allclose<bfloat16>(host_output, device_output, args...);
return ttnn::allclose<bfloat16>(host_output, device_output, args...);
} else if constexpr (unary_op_type == UnaryOpType::EXP) {
auto host_output = host_function<::detail::exp>(input_tensor);
auto device_output = ttnn::exp(input_tensor.to(device)).cpu();
return ttnn::numpy::allclose<bfloat16>(host_output, device_output, args...);
return ttnn::allclose<bfloat16>(host_output, device_output, args...);
} else if constexpr (unary_op_type == UnaryOpType::RECIP) {
auto host_output = host_function<::detail::recip>(input_tensor);
auto device_output = ttnn::reciprocal(input_tensor.to(device)).cpu();
return ttnn::numpy::allclose<bfloat16>(host_output, device_output, args...);
return ttnn::allclose<bfloat16>(host_output, device_output, args...);
} else if constexpr (unary_op_type == UnaryOpType::GELU) {
auto host_output = host_function<::detail::gelu>(input_tensor);
auto device_output = ttnn::gelu(input_tensor.to(device)).cpu();
return ttnn::numpy::allclose<bfloat16>(host_output, device_output, args...);
return ttnn::allclose<bfloat16>(host_output, device_output, args...);
} else if constexpr (unary_op_type == UnaryOpType::RELU) {
auto host_output = host_function<::detail::relu>(input_tensor);
auto device_output = ttnn::relu(input_tensor.to(device)).cpu();
return ttnn::numpy::allclose<bfloat16>(host_output, device_output, args...);
return ttnn::allclose<bfloat16>(host_output, device_output, args...);
} else if constexpr (unary_op_type == UnaryOpType::SIGMOID) {
auto host_output = host_function<::detail::sigmoid>(input_tensor);
auto device_output = ttnn::sigmoid(input_tensor.to(device)).cpu();
return ttnn::numpy::allclose<bfloat16>(host_output, device_output, args...);
return ttnn::allclose<bfloat16>(host_output, device_output, args...);
} else if constexpr (unary_op_type == UnaryOpType::LOG) {
auto host_output = host_function<::detail::log>(input_tensor);
auto device_output = ttnn::log(input_tensor.to(device)).cpu();
return ttnn::numpy::allclose<bfloat16>(host_output, device_output, args...);
return ttnn::allclose<bfloat16>(host_output, device_output, args...);
} else if constexpr (unary_op_type == UnaryOpType::TANH) {
auto host_output = host_function<::detail::tanh>(input_tensor);
auto device_output = ttnn::tanh(input_tensor.to(device)).cpu();
return ttnn::numpy::allclose<bfloat16>(host_output, device_output, args...);
return ttnn::allclose<bfloat16>(host_output, device_output, args...);
}
TT_ASSERT(false, "Unsupported function");
return false;
Expand All @@ -111,7 +111,7 @@ void test_operation_infrastructure() {
auto device = tt::tt_metal::CreateDevice(device_id);

auto shape = tt::tt_metal::LegacyShape{1, 1, TILE_HEIGHT, TILE_WIDTH};
auto input_tensor = ttnn::numpy::random::uniform(bfloat16(0), bfloat16(1), shape).to(Layout::TILE).to(device);
auto input_tensor = ttnn::random::uniform(bfloat16(0), bfloat16(1), shape).to(Layout::TILE).to(device);

ttnn::operations::unary::operation_attributes_t op_args{
{UnaryWithParam{UnaryOpType::SQRT}},
Expand Down Expand Up @@ -139,7 +139,7 @@ void test_shape_padding() {

tt::tt_metal::Array4D input_shape = {1, 1, 13, 18};
tt::tt_metal::Array4D padded_input_shape = {1, 1, TILE_HEIGHT, TILE_WIDTH};
auto input_tensor = ttnn::numpy::random::uniform(bfloat16(0), bfloat16(1), input_shape);
auto input_tensor = ttnn::random::uniform(bfloat16(0), bfloat16(1), input_shape);

auto padded_input_tensor = ttnn::pad(input_tensor, padded_input_shape, tt::tt_metal::Array4D({0, 0, 0, 0}), 0);

Expand Down Expand Up @@ -251,7 +251,7 @@ void test_program_cache() {

// Allocate a tensor to show that the addresses aren't cached
auto input_tensor =
ttnn::numpy::random::uniform(bfloat16(0.0f), bfloat16(0.0f), {1, 1, 32, 32}).to(Layout::TILE).to(device);
ttnn::random::uniform(bfloat16(0.0f), bfloat16(0.0f), {1, 1, 32, 32}).to(Layout::TILE).to(device);

// Program Cache Hit
run_test<UnaryOpType::EXP>(device, {1, 1, TILE_HEIGHT, TILE_WIDTH}, 0.0f, 1.0f, 1e-1f, 1e-5f);
Expand Down
4 changes: 2 additions & 2 deletions tests/tt_eager/ops/test_fold_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
#include <algorithm>
#include <functional>
#include <random>
#include <ttnn/operations/numpy/functions.hpp>
#include <ttnn/operations/functions.hpp>

#include "ttnn/tensor/tensor.hpp"
#include "ttnn/operations/data_movement/fold/fold.hpp"
Expand All @@ -16,7 +16,7 @@ using namespace tt::tt_metal;
using namespace constants;

void run_fold(Device* device, tt::tt_metal::LegacyShape shape) {
Tensor input_tensor = ttnn::numpy::random::random(shape).to(Layout::ROW_MAJOR).to(device);
Tensor input_tensor = ttnn::random::random(shape).to(Layout::ROW_MAJOR).to(device);
uint32_t stride_h = 2;
uint32_t stride_w = 2;
uint8_t queue_id = 0;
Expand Down
4 changes: 2 additions & 2 deletions tests/tt_eager/ops/test_layernorm_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
#include "tt_metal/host_api.hpp"
#include "ttnn/tensor/tensor.hpp"
#include "ttnn/operations/normalization/layernorm/layernorm.hpp"
#include <ttnn/operations/numpy/functions.hpp>
#include <ttnn/operations/functions.hpp>

#include <algorithm>
#include <functional>
Expand All @@ -29,7 +29,7 @@ int main(int argc, char** argv) {
int device_id = 0;
tt_metal::Device* device = tt_metal::CreateDevice(device_id);
tt::tt_metal::LegacyShape shape = {1, 1, TILE_HEIGHT, TILE_WIDTH};
Tensor a = ttnn::numpy::random::random(shape).to(Layout::TILE).to(device);
Tensor a = ttnn::random::random(shape).to(Layout::TILE).to(device);
Tensor c = ttnn::layer_norm(a, 1e-4f);
Tensor d = c.cpu();
Tensor host_a = a.cpu(); // Move tensor a to host to validate
Expand Down
Loading

0 comments on commit ff50e72

Please sign in to comment.