From ff50e72dc477ec40352e27e7124dee71f0631539 Mon Sep 17 00:00:00 2001 From: Oleg Milyutin Date: Tue, 10 Dec 2024 13:00:21 -0500 Subject: [PATCH] #14974: Remove numpy/ directory and the namespace in ttnn (#15852) ### Ticket #14974 ### Problem description Follow up from https://github.com/tenstorrent/tt-metal/pull/15847 - remove usages of `numpy` namespace. ### What's changed * Removed `ttnn::numpy` namespace and the corresponding directory. * Moved `.../operations/numpy/functions.hpp` in `.../operations/`. ### Checklist - [X] [Post commit CI passes](https://github.com/tenstorrent/tt-metal/actions/runs/12249474071) - [X] New/Existing tests provide coverage for changes --- .../tt_eager/integration_tests/test_bert.cpp | 46 +++++++------------ tests/tt_eager/ops/test_average_pool.cpp | 4 +- tests/tt_eager/ops/test_bcast_op.cpp | 12 ++--- tests/tt_eager/ops/test_bmm_op.cpp | 4 +- tests/tt_eager/ops/test_eltwise_binary_op.cpp | 10 ++-- tests/tt_eager/ops/test_eltwise_unary_op.cpp | 26 +++++------ tests/tt_eager/ops/test_fold_op.cpp | 4 +- tests/tt_eager/ops/test_layernorm_op.cpp | 4 +- tests/tt_eager/ops/test_pad_op.cpp | 4 +- .../tt_eager/ops/test_sliding_window_ops.cpp | 6 +-- tests/tt_eager/ops/test_softmax_op.cpp | 4 +- tests/tt_eager/ops/test_tensor_utils.cpp | 4 +- tests/tt_eager/ops/test_tilize_op.cpp | 4 +- .../ops/test_tilize_op_channels_last.cpp | 4 +- .../tt_eager/ops/test_tilize_zero_padding.cpp | 4 +- tests/tt_eager/ops/test_transpose_op.cpp | 4 +- .../ops/test_transpose_wh_multi_core.cpp | 4 +- .../ops/test_transpose_wh_single_core.cpp | 4 +- tests/tt_eager/tensors/test_copy_and_move.cpp | 38 +++++++-------- .../tensors/test_host_device_loopback.cpp | 6 +-- tests/tt_eager/tensors/test_ranks.cpp | 16 +++---- .../tensors/test_raw_host_memory_pointer.cpp | 16 +++---- tests/tt_metal/test_utils/comparison.hpp | 4 +- .../gtests/tensor/test_create_tensor.cpp | 2 +- .../tensor/test_create_tensor_with_layout.cpp | 2 +- tests/ttnn/unit_tests/gtests/test_add.cpp | 3 +- .../gtests/test_multi_cq_multi_dev.cpp | 2 +- .../gtests/test_multiprod_queue.cpp | 2 +- tt_metal/common/bfloat16.hpp | 2 +- ttnn/cpp/ttnn/operations/creation.hpp | 2 +- .../reshape_on_device/reshape.cpp | 2 +- .../data_movement/reshape_view/reshape.cpp | 2 +- .../unary/device/unary_composite_op.cpp | 6 +-- .../eltwise/unary_backward/unary_backward.cpp | 2 +- .../experimental/reduction/argmax/argmax.cpp | 12 ++--- .../ttnn/operations/{numpy => }/functions.hpp | 6 +-- ...ple_bilinear_program_factory_multicore.cpp | 2 +- .../reduction/prod/device/prod_op_all.cpp | 6 +-- .../ttnn/operations/reduction/prod/prod.cpp | 2 +- 39 files changed, 134 insertions(+), 153 deletions(-) rename ttnn/cpp/ttnn/operations/{numpy => }/functions.hpp (99%) diff --git a/tests/tt_eager/integration_tests/test_bert.cpp b/tests/tt_eager/integration_tests/test_bert.cpp index 81e51414ebe..6140860666a 100644 --- a/tests/tt_eager/integration_tests/test_bert.cpp +++ b/tests/tt_eager/integration_tests/test_bert.cpp @@ -10,7 +10,7 @@ #include "ttnn/operations/normalization/softmax/softmax.hpp" #include "tt_metal/common/constants.hpp" #include "tt_metal/host_api.hpp" -#include "ttnn/operations/numpy/functions.hpp" +#include "ttnn/operations/functions.hpp" #include "ttnn/operations/matmul/matmul.hpp" #include "ttnn/operations/normalization/layernorm/layernorm.hpp" #include "ttnn/operations/eltwise/binary/binary.hpp" @@ -228,7 +228,7 @@ void test_bert() { std::uint32_t intermediate_size = hidden_size * 4; auto attention_mask = - ttnn::numpy::random::uniform( + ttnn::random::uniform( bfloat16(-1.0f), bfloat16(1.0f), {batch_size, 1, TILE_HEIGHT, sequence_size}, Layout::TILE) .to(device, l1_memory_config); @@ -236,73 +236,61 @@ void test_bert() { for (auto encoder_index = 0; encoder_index < num_encoders; encoder_index++) { parameters.emplace( fmt::format("fused_qkv_weight_{}", encoder_index), - ttnn::numpy::random::uniform( - bfloat16(-1.0f), bfloat16(1.0f), {1, 1, hidden_size, hidden_size * 3}, Layout::TILE) + ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, hidden_size, hidden_size * 3}, Layout::TILE) .to(device, dram_memory_config)); parameters.emplace( fmt::format("fused_qkv_bias_{}", encoder_index), - ttnn::numpy::random::uniform( - bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, hidden_size * 3}, Layout::TILE) + ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, hidden_size * 3}, Layout::TILE) .to(device, dram_memory_config)); parameters.emplace( fmt::format("selfout_weight_{}", encoder_index), - ttnn::numpy::random::uniform( - bfloat16(-1.0f), bfloat16(1.0f), {1, 1, hidden_size, hidden_size}, Layout::TILE) + ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, hidden_size, hidden_size}, Layout::TILE) .to(device, dram_memory_config)); parameters.emplace( fmt::format("selfout_bias_{}", encoder_index), - ttnn::numpy::random::uniform( - bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, hidden_size}, Layout::TILE) + ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, hidden_size}, Layout::TILE) .to(device, dram_memory_config)); parameters.emplace( fmt::format("attention_layernorm_weight_{}", encoder_index), - ttnn::numpy::random::uniform( - bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::ROW_MAJOR) + ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::ROW_MAJOR) .to(device, dram_memory_config)); parameters.emplace( fmt::format("attention_layernorm_bias_{}", encoder_index), - ttnn::numpy::random::uniform( - bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::ROW_MAJOR) + ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::ROW_MAJOR) .to(device, dram_memory_config)); parameters.emplace( fmt::format("ff1_weight_{}", encoder_index), - ttnn::numpy::random::uniform( - bfloat16(-1.0f), bfloat16(1.0f), {1, 1, hidden_size, intermediate_size}, Layout::TILE) + ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, hidden_size, intermediate_size}, Layout::TILE) .to(device, dram_memory_config)); parameters.emplace( fmt::format("ff1_bias_{}", encoder_index), - ttnn::numpy::random::uniform( - bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, intermediate_size}, Layout::TILE) + ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, intermediate_size}, Layout::TILE) .to(device, dram_memory_config)); parameters.emplace( fmt::format("ff2_weight_{}", encoder_index), - ttnn::numpy::random::uniform( - bfloat16(-1.0f), bfloat16(1.0f), {1, 1, intermediate_size, hidden_size}, Layout::TILE) + ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, intermediate_size, hidden_size}, Layout::TILE) .to(device, dram_memory_config)); parameters.emplace( fmt::format("ff2_bias_{}", encoder_index), - ttnn::numpy::random::uniform( - bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, hidden_size}, Layout::TILE) + ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, hidden_size}, Layout::TILE) .to(device, dram_memory_config)); parameters.emplace( fmt::format("feedforward_layernorm_weight_{}", encoder_index), - ttnn::numpy::random::uniform( - bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::ROW_MAJOR) + ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::ROW_MAJOR) .to(device, dram_memory_config)); parameters.emplace( fmt::format("feedforward_layernorm_bias_{}", encoder_index), - ttnn::numpy::random::uniform( - bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::ROW_MAJOR) + ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::ROW_MAJOR) .to(device, dram_memory_config)); }; parameters.emplace( "qa_head_weight", - ttnn::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, hidden_size, TILE_WIDTH}, Layout::TILE) + ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, hidden_size, TILE_WIDTH}, Layout::TILE) .to(device, dram_memory_config)); parameters.emplace( "qa_head_bias", ttnn::reshape( - ttnn::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::TILE) + ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::TILE) .to(device, dram_memory_config), ttnn::Shape{tt::tt_metal::LegacyShape{{1, 1, 1, TILE_WIDTH}, {1, 1, TILE_HEIGHT, TILE_WIDTH}}})); @@ -310,7 +298,7 @@ void test_bert() { tt::log_debug(tt::LogTest, "run_bert started"); auto begin = std::chrono::steady_clock::now(); auto hidden_states = - ttnn::numpy::random::uniform( + ttnn::random::uniform( bfloat16(-1.0f), bfloat16(1.0f), {batch_size, 1, sequence_size, hidden_size}, Layout::TILE) .to(device, l1_memory_config); for (auto encoder_index = 0; encoder_index < num_encoders; encoder_index++) { diff --git a/tests/tt_eager/ops/test_average_pool.cpp b/tests/tt_eager/ops/test_average_pool.cpp index b30f22a898d..76a24f50e04 100644 --- a/tests/tt_eager/ops/test_average_pool.cpp +++ b/tests/tt_eager/ops/test_average_pool.cpp @@ -4,7 +4,7 @@ #include "ttnn/operations/pool/global_avg_pool/global_avg_pool.hpp" #include "ttnn/operations/experimental/auto_format/auto_format.hpp" -#include "ttnn/operations/numpy/functions.hpp" +#include "ttnn/operations/functions.hpp" #include "ttnn/tensor/tensor.hpp" #include "common/constants.hpp" @@ -17,7 +17,7 @@ using tt::tt_metal::Tensor; Tensor run_avg_pool_2d_resnet(tt::tt_metal::LegacyShape& tensor_shape, Device* device) { using ttnn::operations::experimental::auto_format::AutoFormat; - auto input_tensor = ttnn::numpy::random::random(tensor_shape, DataType::BFLOAT16); + auto input_tensor = ttnn::random::random(tensor_shape, DataType::BFLOAT16); auto padded_input_shape = AutoFormat::pad_to_tile_shape(tensor_shape, false, false); Tensor padded_input_tensor = input_tensor; if (!AutoFormat::check_input_tensor_format(input_tensor, padded_input_shape)) { diff --git a/tests/tt_eager/ops/test_bcast_op.cpp b/tests/tt_eager/ops/test_bcast_op.cpp index 05be3303c06..38675a93723 100644 --- a/tests/tt_eager/ops/test_bcast_op.cpp +++ b/tests/tt_eager/ops/test_bcast_op.cpp @@ -8,7 +8,7 @@ #include "ttnn/operations/data_movement/bcast/bcast.hpp" #include "common/constants.hpp" #include -#include +#include using namespace tt; using namespace tt_metal; @@ -49,7 +49,7 @@ int main(int argc, char** argv) { throw std::runtime_error("Unsupported Dim!"); } - Tensor a = ttnn::numpy::random::random(input_shape_a).to(Layout::TILE).to(device); + Tensor a = ttnn::random::random(input_shape_a).to(Layout::TILE).to(device); Tensor b = ttnn::zeros( ttnn::Shape({1, 1, TILE_HEIGHT, TILE_WIDTH}), DataType::BFLOAT16, Layout::TILE, *device); @@ -67,28 +67,28 @@ int main(int argc, char** argv) { } { - Tensor a = ttnn::numpy::random::random({1, 1, 32, 4544}).to(Layout::TILE).to(device); + Tensor a = ttnn::random::random({1, 1, 32, 4544}).to(Layout::TILE).to(device); Tensor b = ttnn::zeros(ttnn::Shape({1, 1, 32, 4544}), DataType::BFLOAT16, Layout::TILE, *device); Tensor c = ttnn::bcast(0, a, b, ttnn::BcastOpMath::MUL, ttnn::BcastOpDim::H); Tensor d = c.cpu(); } { - Tensor a = ttnn::numpy::random::random({1, 1, 32, 4544}).to(Layout::TILE).to(device); + Tensor a = ttnn::random::random({1, 1, 32, 4544}).to(Layout::TILE).to(device); Tensor b = ttnn::zeros(ttnn::Shape({1, 1, 32, 4544}), DataType::BFLOAT16, Layout::TILE, *device); Tensor c = ttnn::bcast(0, a, b, ttnn::BcastOpMath::ADD, ttnn::BcastOpDim::H); Tensor d = c.cpu(); } { - Tensor a = ttnn::numpy::random::random({1, 71, 32, 32}).to(Layout::TILE).to(device); + Tensor a = ttnn::random::random({1, 71, 32, 32}).to(Layout::TILE).to(device); Tensor b = ttnn::zeros(ttnn::Shape({1, 1, 32, 32}), DataType::BFLOAT16, Layout::TILE, *device); Tensor c = ttnn::bcast(0, a, b, ttnn::BcastOpMath::MUL, ttnn::BcastOpDim::HW); Tensor d = c.cpu(); } { - Tensor a = ttnn::numpy::random::random({1, 71, 32, 64}).to(Layout::TILE).to(device); + Tensor a = ttnn::random::random({1, 71, 32, 64}).to(Layout::TILE).to(device); Tensor b = ttnn::zeros(ttnn::Shape({1, 1, 32, 32}), DataType::BFLOAT16, Layout::TILE, *device); Tensor c = ttnn::bcast(0, a, b, ttnn::BcastOpMath::MUL, ttnn::BcastOpDim::HW); Tensor d = c.cpu(); diff --git a/tests/tt_eager/ops/test_bmm_op.cpp b/tests/tt_eager/ops/test_bmm_op.cpp index f769870b595..a247e375718 100644 --- a/tests/tt_eager/ops/test_bmm_op.cpp +++ b/tests/tt_eager/ops/test_bmm_op.cpp @@ -8,7 +8,7 @@ #include "ttnn/tensor/types.hpp" #include "ttnn/operations/matmul/device/matmul_op.hpp" #include "common/constants.hpp" -#include "ttnn/operations/numpy/functions.hpp" +#include "ttnn/operations/functions.hpp" using namespace tt; using namespace tt_metal; @@ -40,7 +40,7 @@ int main(int argc, char** argv) { ttnn::Shape shapeb1({1, 1, Kt * TILE_HEIGHT, Nt * TILE_WIDTH}); // Allocates a DRAM buffer on device populated with values specified by initialize - Tensor a = ttnn::numpy::random::random(shapea.value).to(Layout::TILE).to(device); + Tensor a = ttnn::random::random(shapea.value).to(Layout::TILE).to(device); Tensor b = ttnn::zeros(shapeb, DataType::BFLOAT16, Layout::TILE, *device); Tensor b1 = ttnn::zeros(shapeb1, DataType::BFLOAT16, Layout::TILE, *device); diff --git a/tests/tt_eager/ops/test_eltwise_binary_op.cpp b/tests/tt_eager/ops/test_eltwise_binary_op.cpp index 52a8955c859..533ad2bed87 100644 --- a/tests/tt_eager/ops/test_eltwise_binary_op.cpp +++ b/tests/tt_eager/ops/test_eltwise_binary_op.cpp @@ -7,7 +7,7 @@ #include "ttnn/tensor/host_buffer/types.hpp" #include "ttnn/tensor/tensor.hpp" #include "ttnn/operations/eltwise/binary/binary.hpp" -#include "ttnn/operations/numpy/functions.hpp" +#include "ttnn/operations/functions.hpp" using tt::tt_metal::DataType; using tt::tt_metal::Device; @@ -37,8 +37,8 @@ Tensor host_function(const Tensor& input_tensor_a, const Tensor& input_tensor_b) template bool run_test( const tt::tt_metal::LegacyShape& shape, const DeviceFunction& device_function, Device* device, Args... args) { - auto input_tensor_a = ttnn::numpy::random::random(shape, DataType::BFLOAT16); - auto input_tensor_b = ttnn::numpy::random::random(shape, DataType::BFLOAT16); + auto input_tensor_a = ttnn::random::random(shape, DataType::BFLOAT16); + auto input_tensor_b = ttnn::random::random(shape, DataType::BFLOAT16); auto host_output = HostFunction(input_tensor_a, input_tensor_b); auto device_output = @@ -46,7 +46,7 @@ bool run_test( .cpu() .to(Layout::ROW_MAJOR); - return ttnn::numpy::allclose(host_output, device_output, args...); + return ttnn::allclose(host_output, device_output, args...); } int main() { @@ -114,7 +114,7 @@ int main() { // Allocate a tensor to show that the addresses aren't cached auto input_tensor = - ttnn::numpy::random::uniform(bfloat16(0.0f), bfloat16(0.0f), {1, 1, 32, 32}).to(Layout::TILE).to(device); + ttnn::random::uniform(bfloat16(0.0f), bfloat16(0.0f), {1, 1, 32, 32}).to(Layout::TILE).to(device); run_binary_ops(); diff --git a/tests/tt_eager/ops/test_eltwise_unary_op.cpp b/tests/tt_eager/ops/test_eltwise_unary_op.cpp index 05efa664dcf..5ca90b2f896 100644 --- a/tests/tt_eager/ops/test_eltwise_unary_op.cpp +++ b/tests/tt_eager/ops/test_eltwise_unary_op.cpp @@ -13,7 +13,7 @@ #include "ttnn/operations/data_movement/pad/pad.hpp" #include "ttnn/operation.hpp" #include "tt_metal/host_api.hpp" -#include "ttnn/operations/numpy/functions.hpp" +#include "ttnn/operations/functions.hpp" using tt::tt_metal::DataType; using tt::tt_metal::Device; @@ -58,7 +58,7 @@ Tensor host_function(const Tensor& input_tensor) { template bool run_test(Device* device, const tt::tt_metal::LegacyShape& shape, float low, float high, Args... args) { - auto input_tensor = ttnn::numpy::random::uniform(bfloat16(low), bfloat16(high), shape).to(Layout::TILE); + auto input_tensor = ttnn::random::uniform(bfloat16(low), bfloat16(high), shape).to(Layout::TILE); using ttnn::operations::unary::UnaryOpType; using ttnn::operations::unary::UnaryWithParam; @@ -66,35 +66,35 @@ bool run_test(Device* device, const tt::tt_metal::LegacyShape& shape, float low, if constexpr (unary_op_type == UnaryOpType::SQRT) { auto host_output = host_function<::detail::sqrt>(input_tensor); auto device_output = ttnn::sqrt(input_tensor.to(device)).cpu(); - return ttnn::numpy::allclose(host_output, device_output, args...); + return ttnn::allclose(host_output, device_output, args...); } else if constexpr (unary_op_type == UnaryOpType::EXP) { auto host_output = host_function<::detail::exp>(input_tensor); auto device_output = ttnn::exp(input_tensor.to(device)).cpu(); - return ttnn::numpy::allclose(host_output, device_output, args...); + return ttnn::allclose(host_output, device_output, args...); } else if constexpr (unary_op_type == UnaryOpType::RECIP) { auto host_output = host_function<::detail::recip>(input_tensor); auto device_output = ttnn::reciprocal(input_tensor.to(device)).cpu(); - return ttnn::numpy::allclose(host_output, device_output, args...); + return ttnn::allclose(host_output, device_output, args...); } else if constexpr (unary_op_type == UnaryOpType::GELU) { auto host_output = host_function<::detail::gelu>(input_tensor); auto device_output = ttnn::gelu(input_tensor.to(device)).cpu(); - return ttnn::numpy::allclose(host_output, device_output, args...); + return ttnn::allclose(host_output, device_output, args...); } else if constexpr (unary_op_type == UnaryOpType::RELU) { auto host_output = host_function<::detail::relu>(input_tensor); auto device_output = ttnn::relu(input_tensor.to(device)).cpu(); - return ttnn::numpy::allclose(host_output, device_output, args...); + return ttnn::allclose(host_output, device_output, args...); } else if constexpr (unary_op_type == UnaryOpType::SIGMOID) { auto host_output = host_function<::detail::sigmoid>(input_tensor); auto device_output = ttnn::sigmoid(input_tensor.to(device)).cpu(); - return ttnn::numpy::allclose(host_output, device_output, args...); + return ttnn::allclose(host_output, device_output, args...); } else if constexpr (unary_op_type == UnaryOpType::LOG) { auto host_output = host_function<::detail::log>(input_tensor); auto device_output = ttnn::log(input_tensor.to(device)).cpu(); - return ttnn::numpy::allclose(host_output, device_output, args...); + return ttnn::allclose(host_output, device_output, args...); } else if constexpr (unary_op_type == UnaryOpType::TANH) { auto host_output = host_function<::detail::tanh>(input_tensor); auto device_output = ttnn::tanh(input_tensor.to(device)).cpu(); - return ttnn::numpy::allclose(host_output, device_output, args...); + return ttnn::allclose(host_output, device_output, args...); } TT_ASSERT(false, "Unsupported function"); return false; @@ -111,7 +111,7 @@ void test_operation_infrastructure() { auto device = tt::tt_metal::CreateDevice(device_id); auto shape = tt::tt_metal::LegacyShape{1, 1, TILE_HEIGHT, TILE_WIDTH}; - auto input_tensor = ttnn::numpy::random::uniform(bfloat16(0), bfloat16(1), shape).to(Layout::TILE).to(device); + auto input_tensor = ttnn::random::uniform(bfloat16(0), bfloat16(1), shape).to(Layout::TILE).to(device); ttnn::operations::unary::operation_attributes_t op_args{ {UnaryWithParam{UnaryOpType::SQRT}}, @@ -139,7 +139,7 @@ void test_shape_padding() { tt::tt_metal::Array4D input_shape = {1, 1, 13, 18}; tt::tt_metal::Array4D padded_input_shape = {1, 1, TILE_HEIGHT, TILE_WIDTH}; - auto input_tensor = ttnn::numpy::random::uniform(bfloat16(0), bfloat16(1), input_shape); + auto input_tensor = ttnn::random::uniform(bfloat16(0), bfloat16(1), input_shape); auto padded_input_tensor = ttnn::pad(input_tensor, padded_input_shape, tt::tt_metal::Array4D({0, 0, 0, 0}), 0); @@ -251,7 +251,7 @@ void test_program_cache() { // Allocate a tensor to show that the addresses aren't cached auto input_tensor = - ttnn::numpy::random::uniform(bfloat16(0.0f), bfloat16(0.0f), {1, 1, 32, 32}).to(Layout::TILE).to(device); + ttnn::random::uniform(bfloat16(0.0f), bfloat16(0.0f), {1, 1, 32, 32}).to(Layout::TILE).to(device); // Program Cache Hit run_test(device, {1, 1, TILE_HEIGHT, TILE_WIDTH}, 0.0f, 1.0f, 1e-1f, 1e-5f); diff --git a/tests/tt_eager/ops/test_fold_op.cpp b/tests/tt_eager/ops/test_fold_op.cpp index 5baeb28532e..b5061eed2f3 100644 --- a/tests/tt_eager/ops/test_fold_op.cpp +++ b/tests/tt_eager/ops/test_fold_op.cpp @@ -5,7 +5,7 @@ #include #include #include -#include +#include #include "ttnn/tensor/tensor.hpp" #include "ttnn/operations/data_movement/fold/fold.hpp" @@ -16,7 +16,7 @@ using namespace tt::tt_metal; using namespace constants; void run_fold(Device* device, tt::tt_metal::LegacyShape shape) { - Tensor input_tensor = ttnn::numpy::random::random(shape).to(Layout::ROW_MAJOR).to(device); + Tensor input_tensor = ttnn::random::random(shape).to(Layout::ROW_MAJOR).to(device); uint32_t stride_h = 2; uint32_t stride_w = 2; uint8_t queue_id = 0; diff --git a/tests/tt_eager/ops/test_layernorm_op.cpp b/tests/tt_eager/ops/test_layernorm_op.cpp index c1cc3cda8e5..563a5b101e7 100644 --- a/tests/tt_eager/ops/test_layernorm_op.cpp +++ b/tests/tt_eager/ops/test_layernorm_op.cpp @@ -5,7 +5,7 @@ #include "tt_metal/host_api.hpp" #include "ttnn/tensor/tensor.hpp" #include "ttnn/operations/normalization/layernorm/layernorm.hpp" -#include +#include #include #include @@ -29,7 +29,7 @@ int main(int argc, char** argv) { int device_id = 0; tt_metal::Device* device = tt_metal::CreateDevice(device_id); tt::tt_metal::LegacyShape shape = {1, 1, TILE_HEIGHT, TILE_WIDTH}; - Tensor a = ttnn::numpy::random::random(shape).to(Layout::TILE).to(device); + Tensor a = ttnn::random::random(shape).to(Layout::TILE).to(device); Tensor c = ttnn::layer_norm(a, 1e-4f); Tensor d = c.cpu(); Tensor host_a = a.cpu(); // Move tensor a to host to validate diff --git a/tests/tt_eager/ops/test_pad_op.cpp b/tests/tt_eager/ops/test_pad_op.cpp index d15662298ad..7be4bce1314 100644 --- a/tests/tt_eager/ops/test_pad_op.cpp +++ b/tests/tt_eager/ops/test_pad_op.cpp @@ -10,7 +10,7 @@ #include "ttnn/operation.hpp" #include "ttnn/operations/data_movement/pad/pad.hpp" #include "tt_metal/host_api.hpp" -#include "ttnn/operations/numpy/functions.hpp" +#include "ttnn/operations/functions.hpp" using tt::tt_metal::DataType; using tt::tt_metal::Device; @@ -28,7 +28,7 @@ void test_operation_infrastructure() { tt::tt_metal::Array4D input_shape = {1, 1, 18, 13}; tt::tt_metal::Array4D padded_shape = {1, 1, TILE_HEIGHT, TILE_WIDTH}; - auto input_tensor = ttnn::numpy::random::uniform(bfloat16(0), bfloat16(1), input_shape); + auto input_tensor = ttnn::random::uniform(bfloat16(0), bfloat16(1), input_shape); auto output_tensor = ttnn::pad(input_tensor, padded_shape, tt::tt_metal::Array4D({0, 0, 0, 0}), 0); TT_FATAL(output_tensor.get_padded_shape() == padded_shape, "Error"); diff --git a/tests/tt_eager/ops/test_sliding_window_ops.cpp b/tests/tt_eager/ops/test_sliding_window_ops.cpp index 7afbd3c7f89..6b975ede57b 100644 --- a/tests/tt_eager/ops/test_sliding_window_ops.cpp +++ b/tests/tt_eager/ops/test_sliding_window_ops.cpp @@ -11,7 +11,7 @@ #include "ttnn/operations/sliding_window/reference_sliding_window.hpp" #include "ttnn/tensor/tensor.hpp" #include "tt_metal/host_api.hpp" -#include "ttnn/operations/numpy/functions.hpp" +#include "ttnn/operations/functions.hpp" #include "ttnn/tensor/types.hpp" using std::vector; @@ -382,9 +382,9 @@ int main() { tt::tt_metal::LegacyShape filter_tensor_shape = {config.window_hw.first, config.window_hw.second}; Tensor input_padded_tensor = - ttnn::numpy::random::random(input_tensor_shape, DataType::BFLOAT16).to(Layout::ROW_MAJOR).cpu(); + ttnn::random::random(input_tensor_shape, DataType::BFLOAT16).to(Layout::ROW_MAJOR).cpu(); Tensor filter_tensor = - ttnn::numpy::random::random(filter_tensor_shape, DataType::BFLOAT16).to(Layout::ROW_MAJOR).cpu(); + ttnn::random::random(filter_tensor_shape, DataType::BFLOAT16).to(Layout::ROW_MAJOR).cpu(); auto input_padded_tensor_buf = owned_buffer::get_as(input_padded_tensor); auto filter_tensor_buf = owned_buffer::get_as(filter_tensor); diff --git a/tests/tt_eager/ops/test_softmax_op.cpp b/tests/tt_eager/ops/test_softmax_op.cpp index 4a7ae198b06..7233bc397fc 100644 --- a/tests/tt_eager/ops/test_softmax_op.cpp +++ b/tests/tt_eager/ops/test_softmax_op.cpp @@ -5,7 +5,7 @@ #include "tt_metal/host_api.hpp" #include "ttnn/tensor/tensor.hpp" #include "ttnn/operations/normalization/softmax/softmax.hpp" -#include "ttnn/operations/numpy/functions.hpp" +#include "ttnn/operations/functions.hpp" #include #include @@ -16,7 +16,7 @@ using namespace tt::tt_metal; using namespace constants; void run_softmax(Device* device, tt::tt_metal::LegacyShape shape) { - Tensor input_tensor = ttnn::numpy::random::random(shape).to(Layout::TILE).to(device); + Tensor input_tensor = ttnn::random::random(shape).to(Layout::TILE).to(device); Tensor device_output_tensor = ttnn::softmax_in_place(input_tensor); Tensor output_tensor = device_output_tensor.cpu(); } diff --git a/tests/tt_eager/ops/test_tensor_utils.cpp b/tests/tt_eager/ops/test_tensor_utils.cpp index 0b6c2e3d376..9a9c1ae3de2 100644 --- a/tests/tt_eager/ops/test_tensor_utils.cpp +++ b/tests/tt_eager/ops/test_tensor_utils.cpp @@ -12,6 +12,7 @@ #include "ttnn/tensor/tensor.hpp" #include "ttnn/tensor/tensor.hpp" #include "ttnn/operations/creation.hpp" +#include "ttnn/operations/functions.hpp" #include "ttnn/tensor/types.hpp" #include "ttnn/tensor/tensor_utils.hpp" @@ -462,8 +463,7 @@ static void test_convert_conv_weight_tensor_to_tiled_layout_block_sharded() { static void test_convert_conv_bias_tensor_to_tiled_layout_block_sharded() { tt::log_info(tt::LogTest, "Running {}", __func__); for (auto i = 0; i < bias_tensor_shape.size(); i++) { - auto input_tensor = - ttnn::numpy::random::random(bias_tensor_shape[i], DataType::BFLOAT16).to(Layout::ROW_MAJOR).cpu(); + auto input_tensor = ttnn::random::random(bias_tensor_shape[i], DataType::BFLOAT16).to(Layout::ROW_MAJOR).cpu(); auto input_buffer = owned_buffer::get_as(input_tensor); auto output_tensor = convert_conv_bias_tensor_to_tiled_layout_block_sharded(input_tensor, shards[i], DataType::BFLOAT16); diff --git a/tests/tt_eager/ops/test_tilize_op.cpp b/tests/tt_eager/ops/test_tilize_op.cpp index c78d37487c8..49531d54a37 100644 --- a/tests/tt_eager/ops/test_tilize_op.cpp +++ b/tests/tt_eager/ops/test_tilize_op.cpp @@ -5,7 +5,7 @@ #include #include #include -#include +#include #include "common/constants.hpp" #include "ttnn/tensor/host_buffer/functions.hpp" @@ -37,7 +37,7 @@ int main(int argc, char** argv) { //////////////////////////////////////////////////////////////////////////// tt::tt_metal::LegacyShape shape = {1, 64, 32, 64}; // Allocates a DRAM buffer on device populated with values specified by initialize - Tensor a = ttnn::numpy::random::random(shape).to(device); + Tensor a = ttnn::random::random(shape).to(device); Tensor b = ttnn::tilize(a); Tensor c = b.cpu(); diff --git a/tests/tt_eager/ops/test_tilize_op_channels_last.cpp b/tests/tt_eager/ops/test_tilize_op_channels_last.cpp index 47dba746653..502bc2b696b 100644 --- a/tests/tt_eager/ops/test_tilize_op_channels_last.cpp +++ b/tests/tt_eager/ops/test_tilize_op_channels_last.cpp @@ -12,7 +12,7 @@ #include "ttnn/tensor/tensor.hpp" #include "ttnn/operations/data_movement/tilize/tilize.hpp" #include "tt_metal/host_api.hpp" -#include "ttnn/operations/numpy/functions.hpp" +#include "ttnn/operations/functions.hpp" using namespace tt; using namespace tt_metal; @@ -37,7 +37,7 @@ int main(int argc, char** argv) { //////////////////////////////////////////////////////////////////////////// tt::tt_metal::LegacyShape shape = {1, 32, 32, 64}; // Allocates a DRAM buffer on device populated with values specified by initialize - Tensor a = ttnn::numpy::random::random(shape).to(device); + Tensor a = ttnn::random::random(shape).to(device); Tensor b = ttnn::tilize(a); Tensor c = b.cpu(); //////////////////////////////////////////////////////////////////////////// diff --git a/tests/tt_eager/ops/test_tilize_zero_padding.cpp b/tests/tt_eager/ops/test_tilize_zero_padding.cpp index 178948d3c18..580bd410295 100644 --- a/tests/tt_eager/ops/test_tilize_zero_padding.cpp +++ b/tests/tt_eager/ops/test_tilize_zero_padding.cpp @@ -12,7 +12,7 @@ #include "ttnn/tensor/tensor.hpp" #include "ttnn/operations/data_movement/tilize_with_val_padding/tilize_with_val_padding.hpp" #include "tt_metal/host_api.hpp" -#include "ttnn/operations/numpy/functions.hpp" +#include "ttnn/operations/functions.hpp" using namespace tt; using namespace tt_metal; @@ -37,7 +37,7 @@ int main(int argc, char** argv) { //////////////////////////////////////////////////////////////////////////// tt::tt_metal::LegacyShape shape = {1, 32, 45, 64}; // Allocates a DRAM buffer on device populated with values specified by initialize - Tensor a = ttnn::numpy::random::random(shape).to(device); + Tensor a = ttnn::random::random(shape).to(device); Tensor b = ttnn::tilize_with_zero_padding(a); Tensor c = b.cpu(); //////////////////////////////////////////////////////////////////////////// diff --git a/tests/tt_eager/ops/test_transpose_op.cpp b/tests/tt_eager/ops/test_transpose_op.cpp index bdb4e64273e..72395003657 100644 --- a/tests/tt_eager/ops/test_transpose_op.cpp +++ b/tests/tt_eager/ops/test_transpose_op.cpp @@ -5,7 +5,7 @@ #include "tt_metal/host_api.hpp" #include "ttnn/tensor/tensor.hpp" #include "ttnn/operations/data_movement/transpose/transpose.hpp" -#include +#include #include #include @@ -33,7 +33,7 @@ int main(int argc, char** argv) { //////////////////////////////////////////////////////////////////////////// tt::tt_metal::LegacyShape shape = {1, 1, TILE_HEIGHT, TILE_WIDTH}; // Allocates a DRAM buffer on device populated with values specified by initialize - Tensor a = ttnn::numpy::random::random(shape).to(Layout::TILE).to(device); + Tensor a = ttnn::random::random(shape).to(Layout::TILE).to(device); tt_metal::Tensor c = ttnn::transpose(a, -2, -1); diff --git a/tests/tt_eager/ops/test_transpose_wh_multi_core.cpp b/tests/tt_eager/ops/test_transpose_wh_multi_core.cpp index 3aba3aaef1e..f4957b4b203 100644 --- a/tests/tt_eager/ops/test_transpose_wh_multi_core.cpp +++ b/tests/tt_eager/ops/test_transpose_wh_multi_core.cpp @@ -5,7 +5,7 @@ #include #include #include -#include +#include #include "ttnn/tensor/host_buffer/functions.hpp" #include "ttnn/tensor/host_buffer/types.hpp" @@ -82,7 +82,7 @@ int main(int argc, char** argv) { //////////////////////////////////////////////////////////////////////////// tt::tt_metal::LegacyShape shape = {1, 1, 10 * TILE_HEIGHT, 12 * TILE_WIDTH}; // Allocates a DRAM buffer on device populated with values specified by initialize - Tensor a = ttnn::numpy::random::random(shape).to(Layout::TILE).to(device); + Tensor a = ttnn::random::random(shape).to(Layout::TILE).to(device); tt_metal::Tensor c = ttnn::transpose(a, -2, -1); diff --git a/tests/tt_eager/ops/test_transpose_wh_single_core.cpp b/tests/tt_eager/ops/test_transpose_wh_single_core.cpp index 3aba3aaef1e..f4957b4b203 100644 --- a/tests/tt_eager/ops/test_transpose_wh_single_core.cpp +++ b/tests/tt_eager/ops/test_transpose_wh_single_core.cpp @@ -5,7 +5,7 @@ #include #include #include -#include +#include #include "ttnn/tensor/host_buffer/functions.hpp" #include "ttnn/tensor/host_buffer/types.hpp" @@ -82,7 +82,7 @@ int main(int argc, char** argv) { //////////////////////////////////////////////////////////////////////////// tt::tt_metal::LegacyShape shape = {1, 1, 10 * TILE_HEIGHT, 12 * TILE_WIDTH}; // Allocates a DRAM buffer on device populated with values specified by initialize - Tensor a = ttnn::numpy::random::random(shape).to(Layout::TILE).to(device); + Tensor a = ttnn::random::random(shape).to(Layout::TILE).to(device); tt_metal::Tensor c = ttnn::transpose(a, -2, -1); diff --git a/tests/tt_eager/tensors/test_copy_and_move.cpp b/tests/tt_eager/tensors/test_copy_and_move.cpp index 656585f3351..5fb62254db1 100644 --- a/tests/tt_eager/tensors/test_copy_and_move.cpp +++ b/tests/tt_eager/tensors/test_copy_and_move.cpp @@ -10,7 +10,7 @@ #include "ttnn/tensor/tensor.hpp" #include "ttnn/tensor/tensor_impl.hpp" #include "tt_metal/host_api.hpp" -#include "ttnn/operations/numpy/functions.hpp" +#include "ttnn/operations/functions.hpp" using namespace tt; using namespace tt_metal; @@ -21,14 +21,14 @@ bool test_tensor_copy_semantics(Device* device) { tt::tt_metal::LegacyShape single_tile_shape = {1, 1, TILE_HEIGHT, TILE_WIDTH}; // host tensor to host tensor copy constructor - Tensor host_a = ttnn::numpy::random::random(single_tile_shape).to(Layout::TILE); + Tensor host_a = ttnn::random::random(single_tile_shape).to(Layout::TILE); Tensor host_a_copy = host_a; auto host_a_data = owned_buffer::get_as(host_a); auto host_a_copy_data = owned_buffer::get_as(host_a_copy); pass &= host_a_data == host_a_copy_data; // dev tensor to dev tensor copy constructor - Tensor dev_a = ttnn::numpy::random::random(single_tile_shape).to(Layout::TILE).to(device); + Tensor dev_a = ttnn::random::random(single_tile_shape).to(Layout::TILE).to(device); Tensor dev_a_copy = dev_a; auto dev_a_on_host = dev_a.cpu(); auto dev_a_copy_on_host = dev_a_copy.cpu(); @@ -40,14 +40,14 @@ bool test_tensor_copy_semantics(Device* device) { Tensor host_c = ttnn::arange(/*start=*/0, /*stop=*/tt_metal::compute_volume(single_tile_shape), /*step=*/1) .reshape(single_tile_shape) .to(Layout::TILE); - Tensor host_c_copy = ttnn::numpy::random::random(single_tile_shape).to(Layout::TILE); + Tensor host_c_copy = ttnn::random::random(single_tile_shape).to(Layout::TILE); host_c_copy = host_c; auto host_c_data = owned_buffer::get_as(host_c); auto host_c_copy_data = owned_buffer::get_as(host_c_copy); pass &= host_c_data == host_c_copy_data; // host tensor updated with dev tensor copy assignment - Tensor host_d_copy = ttnn::numpy::random::random(single_tile_shape).to(Layout::TILE); + Tensor host_d_copy = ttnn::random::random(single_tile_shape).to(Layout::TILE); host_d_copy = dev_a; pass &= (host_d_copy.storage_type() == StorageType::DEVICE); auto host_d_copy_on_host = host_d_copy.cpu(); @@ -56,7 +56,7 @@ bool test_tensor_copy_semantics(Device* device) { // dev tensor updated with host tensor copy assignment Tensor host_e = ttnn::ones(single_tile_shape, DataType::BFLOAT16, Layout::TILE); - Tensor dev_e_copy = ttnn::numpy::random::random(single_tile_shape).to(Layout::TILE).to(device); + Tensor dev_e_copy = ttnn::random::random(single_tile_shape).to(Layout::TILE).to(device); dev_e_copy = host_e; pass &= (dev_e_copy.storage_type() == StorageType::OWNED); auto host_e_data = owned_buffer::get_as(host_e); @@ -81,7 +81,7 @@ bool test_tensor_move_semantics(Device* device) { bool pass = true; tt::tt_metal::LegacyShape single_tile_shape = {1, 1, TILE_HEIGHT, TILE_WIDTH}; - auto random_tensor = ttnn::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), single_tile_shape); + auto random_tensor = ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), single_tile_shape); auto bfloat_data = owned_buffer::get_as(random_tensor); // host tensor to host tensor move constructor @@ -100,7 +100,7 @@ bool test_tensor_move_semantics(Device* device) { pass &= dev_a_copy_data == bfloat_data; // host tensor updated with host tensor move assignment - auto random_tensor_three = ttnn::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), single_tile_shape); + auto random_tensor_three = ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), single_tile_shape); auto bfloat_data_three = owned_buffer::get_as(random_tensor_three); Tensor host_c = Tensor(OwnedStorage{bfloat_data_three}, single_tile_shape, DataType::BFLOAT16, Layout::TILE); Tensor host_c_copy = Tensor(dev_a_copy_on_host.get_storage(), single_tile_shape, DataType::BFLOAT16, Layout::TILE); @@ -117,7 +117,7 @@ bool test_tensor_move_semantics(Device* device) { pass &= host_d_copy_data == bfloat_data; // dev tensor updated with host tensor copy assignment - auto random_tensor_four = ttnn::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), single_tile_shape); + auto random_tensor_four = ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), single_tile_shape); auto bfloat_data_four = owned_buffer::get_as(random_tensor_four); Tensor host_e = Tensor(random_tensor_four.get_storage(), single_tile_shape, DataType::BFLOAT16, Layout::TILE); Tensor dev_e_copy = @@ -128,7 +128,7 @@ bool test_tensor_move_semantics(Device* device) { pass &= dev_e_copy_data == bfloat_data_four; // dev tensor updated with dev tensor copy assignment - auto random_tensor_five = ttnn::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), single_tile_shape); + auto random_tensor_five = ttnn::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), single_tile_shape); auto bfloat_data_five = owned_buffer::get_as(random_tensor_five); Tensor dev_b = Tensor(random_tensor_four.get_storage(), single_tile_shape, DataType::BFLOAT16, Layout::TILE).to(device); @@ -153,32 +153,32 @@ bool test_tensor_deallocate_semantics(Device* device) { MemoryConfig{.memory_layout = TensorMemoryLayout::INTERLEAVED, .buffer_type = BufferType::L1}; // dev tensor allocate, deallocate, reallocate same address DRAM - Tensor dev_a = ttnn::numpy::random::random(single_tile_shape).to(Layout::TILE).to(device, dram_mem_config); + Tensor dev_a = ttnn::random::random(single_tile_shape).to(Layout::TILE).to(device, dram_mem_config); uint32_t address_a = dev_a.buffer()->address(); dev_a.deallocate(); - Tensor dev_b = ttnn::numpy::random::random(single_tile_shape).to(Layout::TILE).to(device, dram_mem_config); + Tensor dev_b = ttnn::random::random(single_tile_shape).to(Layout::TILE).to(device, dram_mem_config); uint32_t address_b = dev_b.buffer()->address(); pass &= address_a == address_b; // dev tensor allocate, allocate, deallocate, reallocate same address DRAM - Tensor dev_c = ttnn::numpy::random::random(single_tile_shape).to(Layout::TILE).to(device, dram_mem_config); + Tensor dev_c = ttnn::random::random(single_tile_shape).to(Layout::TILE).to(device, dram_mem_config); dev_b.deallocate(); - Tensor dev_d = ttnn::numpy::random::random(single_tile_shape).to(Layout::TILE).to(device, dram_mem_config); + Tensor dev_d = ttnn::random::random(single_tile_shape).to(Layout::TILE).to(device, dram_mem_config); uint32_t address_d = dev_d.buffer()->address(); pass &= address_b == address_d; // dev tensor allocate, deallocate, reallocate same address L1 - Tensor dev_e = ttnn::numpy::random::random(single_tile_shape).to(Layout::TILE).to(device, l1_mem_config); + Tensor dev_e = ttnn::random::random(single_tile_shape).to(Layout::TILE).to(device, l1_mem_config); uint32_t address_e = dev_e.buffer()->address(); dev_e.deallocate(); - Tensor dev_f = ttnn::numpy::random::random(single_tile_shape).to(Layout::TILE).to(device, l1_mem_config); + Tensor dev_f = ttnn::random::random(single_tile_shape).to(Layout::TILE).to(device, l1_mem_config); uint32_t address_f = dev_f.buffer()->address(); pass &= address_e == address_f; // dev tensor allocate, allocate, deallocate, reallocate same address DRAM - Tensor dev_g = ttnn::numpy::random::random(single_tile_shape).to(Layout::TILE).to(device, l1_mem_config); + Tensor dev_g = ttnn::random::random(single_tile_shape).to(Layout::TILE).to(device, l1_mem_config); dev_f.deallocate(); - Tensor dev_h = ttnn::numpy::random::random(single_tile_shape).to(Layout::TILE).to(device, l1_mem_config); + Tensor dev_h = ttnn::random::random(single_tile_shape).to(Layout::TILE).to(device, l1_mem_config); uint32_t address_h = dev_h.buffer()->address(); pass &= address_f == address_h; @@ -195,7 +195,7 @@ bool test_tensor_deallocate_and_close_device(Device* device) { MemoryConfig{.memory_layout = TensorMemoryLayout::INTERLEAVED, .buffer_type = BufferType::L1}; // dev tensor allocate, deallocate, reallocate same address DRAM - Tensor dev_a = ttnn::numpy::random::random(single_tile_shape).to(Layout::TILE).to(device, dram_mem_config); + Tensor dev_a = ttnn::random::random(single_tile_shape).to(Layout::TILE).to(device, dram_mem_config); uint32_t address_a = dev_a.buffer()->address(); pass &= tt_metal::CloseDevice(device); dev_a.deallocate(); diff --git a/tests/tt_eager/tensors/test_host_device_loopback.cpp b/tests/tt_eager/tensors/test_host_device_loopback.cpp index a49871a4d39..f5680c980ca 100644 --- a/tests/tt_eager/tensors/test_host_device_loopback.cpp +++ b/tests/tt_eager/tensors/test_host_device_loopback.cpp @@ -11,7 +11,7 @@ #include "ttnn/tensor/host_buffer/types.hpp" #include "ttnn/tensor/tensor.hpp" #include "tt_metal/host_api.hpp" -#include "ttnn/operations/numpy/functions.hpp" +#include "ttnn/operations/functions.hpp" using namespace tt; using namespace tt_metal; @@ -21,7 +21,7 @@ bool test_single_tile_single_dram_bank_loopback(Device* device) { bool pass = true; tt::tt_metal::LegacyShape single_tile_shape = {1, 1, TILE_HEIGHT, TILE_WIDTH}; - Tensor host_a = ttnn::numpy::random::random(single_tile_shape).to(Layout::TILE); + Tensor host_a = ttnn::random::random(single_tile_shape).to(Layout::TILE); Tensor device_a = host_a.to(device); Tensor loopbacked_a = device_a.cpu(); auto host_a_data = owned_buffer::get_as(host_a); @@ -35,7 +35,7 @@ bool test_multi_tile_multi_dram_bank_loopback(Device* device) { bool pass = true; tt::tt_metal::LegacyShape multi_tile_shape = {1, 1, 4 * TILE_HEIGHT, 3 * TILE_WIDTH}; - Tensor host_a = ttnn::numpy::random::random(multi_tile_shape).to(Layout::TILE); + Tensor host_a = ttnn::random::random(multi_tile_shape).to(Layout::TILE); Tensor device_a = host_a.to(device); Tensor loopbacked_a = device_a.cpu(); auto host_a_data = owned_buffer::get_as(host_a); diff --git a/tests/tt_eager/tensors/test_ranks.cpp b/tests/tt_eager/tensors/test_ranks.cpp index e01b9bb2261..037a823de23 100644 --- a/tests/tt_eager/tensors/test_ranks.cpp +++ b/tests/tt_eager/tensors/test_ranks.cpp @@ -13,7 +13,7 @@ #include "ttnn/tensor/tensor.hpp" #include "ttnn/tensor/tensor_impl.hpp" #include "tt_metal/host_api.hpp" -#include "ttnn/operations/numpy/functions.hpp" +#include "ttnn/operations/functions.hpp" using namespace tt; using namespace tt_metal; @@ -23,7 +23,7 @@ bool test_2d_tensor(Device* device) { bool pass = true; Shape shape = {30, 30}; - Tensor tensor = ttnn::numpy::random::random(shape); + Tensor tensor = ttnn::random::random(shape); tensor = tensor.pad_to_tile(0.0f); tensor = tensor.to(Layout::TILE); tensor = tensor.to(device); @@ -36,7 +36,7 @@ bool test_3d_tensor(Device* device) { bool pass = true; Shape shape = {3, 30, 30}; - Tensor tensor = ttnn::numpy::random::random(shape); + Tensor tensor = ttnn::random::random(shape); tensor = tensor.pad_to_tile(0.0f); tensor = tensor.to(Layout::TILE); tensor = tensor.to(device); @@ -49,7 +49,7 @@ bool test_4d_tensor(Device* device) { bool pass = true; Shape shape = {2, 3, 30, 30}; - Tensor tensor = ttnn::numpy::random::random(shape); + Tensor tensor = ttnn::random::random(shape); tensor = tensor.pad_to_tile(0.0f); tensor = tensor.to(Layout::TILE); tensor = tensor.to(device); @@ -62,7 +62,7 @@ bool test_5d_tensor(Device* device) { bool pass = true; Shape shape = {2, 2, 3, 30, 30}; - Tensor tensor = ttnn::numpy::random::random(shape); + Tensor tensor = ttnn::random::random(shape); tensor = tensor.pad_to_tile(0.0f); tensor = tensor.to(Layout::TILE); tensor = tensor.to(device); @@ -75,7 +75,7 @@ bool test_6d_tensor(Device* device) { bool pass = true; Shape shape = {2, 2, 2, 3, 30, 30}; - Tensor tensor = ttnn::numpy::random::random(shape); + Tensor tensor = ttnn::random::random(shape); tensor = tensor.pad_to_tile(0.0f); tensor = tensor.to(Layout::TILE); tensor = tensor.to(device); @@ -88,7 +88,7 @@ bool test_7d_tensor(Device* device) { bool pass = true; Shape shape = {2, 2, 2, 2, 3, 30, 30}; - Tensor tensor = ttnn::numpy::random::random(shape); + Tensor tensor = ttnn::random::random(shape); tensor = tensor.pad_to_tile(0.0f); tensor = tensor.to(Layout::TILE); tensor = tensor.to(device); @@ -101,7 +101,7 @@ bool test_8d_tensor(Device* device) { bool pass = true; Shape shape = {2, 2, 2, 2, 2, 3, 30, 30}; - Tensor tensor = ttnn::numpy::random::random(shape); + Tensor tensor = ttnn::random::random(shape); tensor = tensor.pad_to_tile(0.0f); tensor = tensor.to(Layout::TILE); tensor = tensor.to(device); diff --git a/tests/tt_eager/tensors/test_raw_host_memory_pointer.cpp b/tests/tt_eager/tensors/test_raw_host_memory_pointer.cpp index d29570a1e66..e3762d2ae29 100644 --- a/tests/tt_eager/tensors/test_raw_host_memory_pointer.cpp +++ b/tests/tt_eager/tensors/test_raw_host_memory_pointer.cpp @@ -15,7 +15,7 @@ #include "ttnn/operations/eltwise/binary/binary.hpp" #include "ttnn/operations/eltwise/unary/unary.hpp" #include "tt_metal/host_api.hpp" -#include "ttnn/operations/numpy/functions.hpp" +#include "ttnn/operations/functions.hpp" /* @@ -42,22 +42,18 @@ */ -namespace numpy { - template -struct ndarray { +struct NDArray { tt::tt_metal::LegacyShape shape; void* data; - ndarray(tt::tt_metal::LegacyShape shape) : + NDArray(tt::tt_metal::LegacyShape shape) : shape(shape), data(malloc(tt::tt_metal::compute_volume(shape) * sizeof(DataType))) {} - ~ndarray() { free(data); } + ~NDArray() { free(data); } std::size_t size() const { return tt::tt_metal::compute_volume(shape); } }; -} // namespace numpy - void test_raw_host_memory_pointer() { using tt::tt_metal::BorrowedStorage; using tt::tt_metal::DataType; @@ -81,7 +77,7 @@ void test_raw_host_memory_pointer() { /* Borrow Data from Numpy Start */ // Create some - auto a_np_array = numpy::ndarray(shape); + auto a_np_array = NDArray(shape); void* a_np_array_data = a_np_array.data; auto on_creation_callback = [] {}; auto on_destruction_callback = [] {}; @@ -154,7 +150,7 @@ void test_raw_host_memory_pointer() { free(storage_of_alternative_tensor_for_printing); /* Alternative Way to Print End */ - auto d_np_array = numpy::ndarray(shape); + auto d_np_array = NDArray(shape); void* d_np_array_data = d_np_array.data; Tensor d_cpu = Tensor( BorrowedStorage{ diff --git a/tests/tt_metal/test_utils/comparison.hpp b/tests/tt_metal/test_utils/comparison.hpp index 8d785ae12ba..c9a70212d7e 100644 --- a/tests/tt_metal/test_utils/comparison.hpp +++ b/tests/tt_metal/test_utils/comparison.hpp @@ -20,8 +20,8 @@ namespace test_utils { //! to_packed() - get packed (into an integral type that is of the bitwidth specified by SIZEOF) //! Constructor(float in) - constructor with a float as the initializer //! Constructor(uint32_t in) - constructor with a uint32_t as the initializer -- only lower bits needed - -// this follows the implementation of numpy::is_close +// +// this follows the implementation of numpy's is_close template bool is_close(const ValueType a, const ValueType b, float rtol = 0.01f, float atol = 0.001f) { float af = 0.0f; diff --git a/tests/ttnn/unit_tests/gtests/tensor/test_create_tensor.cpp b/tests/ttnn/unit_tests/gtests/tensor/test_create_tensor.cpp index 9103b27b098..e98fbf993bd 100644 --- a/tests/ttnn/unit_tests/gtests/tensor/test_create_tensor.cpp +++ b/tests/ttnn/unit_tests/gtests/tensor/test_create_tensor.cpp @@ -9,7 +9,7 @@ #include "ttnn/device.hpp" #include "ttnn/operations/core/core.hpp" #include "ttnn/async_runtime.hpp" -#include "ttnn/operations/numpy/functions.hpp" +#include "ttnn/operations/functions.hpp" #include "tt_metal/common/logger.hpp" #include "common_tensor_test_utils.hpp" diff --git a/tests/ttnn/unit_tests/gtests/tensor/test_create_tensor_with_layout.cpp b/tests/ttnn/unit_tests/gtests/tensor/test_create_tensor_with_layout.cpp index 4e413a576a2..c7a3f077c73 100644 --- a/tests/ttnn/unit_tests/gtests/tensor/test_create_tensor_with_layout.cpp +++ b/tests/ttnn/unit_tests/gtests/tensor/test_create_tensor_with_layout.cpp @@ -8,7 +8,7 @@ #include "ttnn/device.hpp" #include "ttnn/operations/core/core.hpp" #include "ttnn/async_runtime.hpp" -#include "ttnn/operations/numpy/functions.hpp" +#include "ttnn/operations/functions.hpp" #include "tt_metal/common/logger.hpp" #include "ttnn/tensor/tensor.hpp" diff --git a/tests/ttnn/unit_tests/gtests/test_add.cpp b/tests/ttnn/unit_tests/gtests/test_add.cpp index 579aa08e4bd..84fda89c066 100644 --- a/tests/ttnn/unit_tests/gtests/test_add.cpp +++ b/tests/ttnn/unit_tests/gtests/test_add.cpp @@ -36,8 +36,7 @@ TEST_P(Add1DTensorAndScalarFixture, AddsScalarCorrectly) { const auto expected_tensor = ttnn::operations::creation::full(shape, param.scalar, DataType::BFLOAT16, ttnn::TILE_LAYOUT, device); TT_FATAL( - ttnn::numpy::allclose<::bfloat16>(ttnn::from_device(expected_tensor), ttnn::from_device(output_tensor)), - "Error"); + ttnn::allclose<::bfloat16>(ttnn::from_device(expected_tensor), ttnn::from_device(output_tensor)), "Error"); } ttnn::close_device(device); } diff --git a/tests/ttnn/unit_tests/gtests/test_multi_cq_multi_dev.cpp b/tests/ttnn/unit_tests/gtests/test_multi_cq_multi_dev.cpp index 5da2cde8f93..c15631eba3e 100644 --- a/tests/ttnn/unit_tests/gtests/test_multi_cq_multi_dev.cpp +++ b/tests/ttnn/unit_tests/gtests/test_multi_cq_multi_dev.cpp @@ -9,7 +9,7 @@ #include "ttnn/operations/eltwise/unary/unary.hpp" #include "common/bfloat16.hpp" #include "ttnn/async_runtime.hpp" -#include "ttnn/operations/numpy/functions.hpp" +#include "ttnn/operations/functions.hpp" #include "tt_metal/impl/event/event.hpp" #include diff --git a/tests/ttnn/unit_tests/gtests/test_multiprod_queue.cpp b/tests/ttnn/unit_tests/gtests/test_multiprod_queue.cpp index 038401cd0ff..6a4a1bc58ab 100644 --- a/tests/ttnn/unit_tests/gtests/test_multiprod_queue.cpp +++ b/tests/ttnn/unit_tests/gtests/test_multiprod_queue.cpp @@ -9,7 +9,7 @@ #include "ttnn/operations/eltwise/binary/binary.hpp" #include "common/bfloat16.hpp" #include "ttnn/async_runtime.hpp" -#include "ttnn/operations/numpy/functions.hpp" +#include "ttnn/operations/functions.hpp" #include "tt_metal/impl/event/event.hpp" #include #include diff --git a/tt_metal/common/bfloat16.hpp b/tt_metal/common/bfloat16.hpp index e81d5059e6b..c03a58513cf 100644 --- a/tt_metal/common/bfloat16.hpp +++ b/tt_metal/common/bfloat16.hpp @@ -340,7 +340,7 @@ inline bool equal_within_n_sig_figs(float a, float b, int n) { inline bool equal_within_absolute_tolerance(float a, float b, float tol) { return std::abs(a - b) < tol; } -// this follows the implementation of numpy::is_close +// this follows the implementation of numpy's is_close inline bool is_close(float a, float b, float rtol = 0.01f, float atol = 0.001f) { // the idea is near zero we want absolute tolerance since relative doesn't make sense // (consider 1e-6f and 1.1e-6f) diff --git a/ttnn/cpp/ttnn/operations/creation.hpp b/ttnn/cpp/ttnn/operations/creation.hpp index 3267e2dab29..e7f5a1198f4 100644 --- a/ttnn/cpp/ttnn/operations/creation.hpp +++ b/ttnn/cpp/ttnn/operations/creation.hpp @@ -13,7 +13,7 @@ #include "ttnn/decorators.hpp" #include "ttnn/distributed/types.hpp" #include "ttnn/operations/eltwise/unary/unary.hpp" -#include "ttnn/operations/numpy/functions.hpp" +#include "ttnn/operations/functions.hpp" #include "ttnn/any_device.hpp" #include "ttnn/tensor/tensor.hpp" #include "ttnn/tensor/tensor_utils.hpp" diff --git a/ttnn/cpp/ttnn/operations/data_movement/reshape_on_device/reshape.cpp b/ttnn/cpp/ttnn/operations/data_movement/reshape_on_device/reshape.cpp index 50b069c43c2..0910eb284cf 100644 --- a/ttnn/cpp/ttnn/operations/data_movement/reshape_on_device/reshape.cpp +++ b/ttnn/cpp/ttnn/operations/data_movement/reshape_on_device/reshape.cpp @@ -6,7 +6,7 @@ #include "ttnn/run_operation.hpp" #include "reshape.hpp" #include "tt_metal/common/constants.hpp" -#include +#include #include "ttnn/operations/experimental/auto_format/auto_format.hpp" #include "ttnn/tensor/tensor_utils.hpp" #include "device/reshape_op.hpp" diff --git a/ttnn/cpp/ttnn/operations/data_movement/reshape_view/reshape.cpp b/ttnn/cpp/ttnn/operations/data_movement/reshape_view/reshape.cpp index 904fdf88a1f..eddb977d02b 100644 --- a/ttnn/cpp/ttnn/operations/data_movement/reshape_view/reshape.cpp +++ b/ttnn/cpp/ttnn/operations/data_movement/reshape_view/reshape.cpp @@ -9,7 +9,7 @@ #include "reshape_common.hpp" #include "tt_metal/common/constants.hpp" #include -#include +#include #include "ttnn/operations/experimental/auto_format/auto_format.hpp" #include "ttnn/tensor/tensor_utils.hpp" #include "ttnn/cpp/ttnn/operations/data_movement/reshape_on_device/reshape.hpp" diff --git a/ttnn/cpp/ttnn/operations/eltwise/unary/device/unary_composite_op.cpp b/ttnn/cpp/ttnn/operations/eltwise/unary/device/unary_composite_op.cpp index 7105ccf31cc..a30bdc2cc5e 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/unary/device/unary_composite_op.cpp +++ b/ttnn/cpp/ttnn/operations/eltwise/unary/device/unary_composite_op.cpp @@ -12,7 +12,7 @@ #include "tt_metal/common/bfloat16.hpp" #include "ttnn/operations/data_movement/reshape_on_device/reshape.hpp" #include "ttnn/operations/data_movement/bcast/bcast.hpp" -#include "ttnn/operations/numpy/functions.hpp" +#include "ttnn/operations/functions.hpp" #include "ttnn/operations/data_movement/slice/slice.hpp" #include "ttnn/operations/eltwise/unary/unary_composite.hpp" #include "ttnn/operations/eltwise/binary/binary_composite.hpp" @@ -721,7 +721,7 @@ Tensor _swiglu(const Tensor& input_a, int32_t dim, const std::optional& output_mem_config) { - Tensor index_l = numpy::index_tril<::bfloat16>( + Tensor index_l = ttnn::index_tril<::bfloat16>( input_a.get_legacy_shape(), diag, DataType::BFLOAT16, @@ -733,7 +733,7 @@ Tensor _tril(const Tensor& input_a, int32_t diag, const std::optional& output_mem_config) { - Tensor index_u = numpy::index_triu<::bfloat16>( + Tensor index_u = ttnn::index_triu<::bfloat16>( input_a.get_legacy_shape(), diag, DataType::BFLOAT16, diff --git a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward.cpp b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward.cpp index a6a9e50998b..f6f8db7c40c 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward.cpp +++ b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward.cpp @@ -1863,7 +1863,7 @@ std::vector ExecuteUnaryBackwardProd::invoke( if (all_dimensions == true) { Tensor temp = ttnn::multiply( prod_result, grad, std::nullopt, output_memory_config); // result is stored in the first position - Tensor fill_tensor = numpy::fill_first_val_into_tensor<::bfloat16>( + Tensor fill_tensor = ttnn::fill_first_val_into_tensor<::bfloat16>( temp, temp.get_dtype(), temp.get_layout(), temp.device(), output_memory_config); Tensor all_dimension_result = ttnn::multiply( ttnn::reciprocal(input, output_memory_config), fill_tensor, std::nullopt, output_memory_config); diff --git a/ttnn/cpp/ttnn/operations/experimental/reduction/argmax/argmax.cpp b/ttnn/cpp/ttnn/operations/experimental/reduction/argmax/argmax.cpp index ec0f2eae610..205198c1d31 100644 --- a/ttnn/cpp/ttnn/operations/experimental/reduction/argmax/argmax.cpp +++ b/ttnn/cpp/ttnn/operations/experimental/reduction/argmax/argmax.cpp @@ -21,7 +21,7 @@ Tensor create_mask(const Tensor& input_a, const std::optional& out return input_a; } float t_inf = -std::numeric_limits::infinity(); - Tensor masked_input = numpy::mask_padded_input<::bfloat16>(padded_shape, unpadded_shape, DataType::BFLOAT16); + Tensor masked_input = ttnn::mask_padded_input<::bfloat16>(padded_shape, unpadded_shape, DataType::BFLOAT16); masked_input = ttnn::where(masked_input, input_a, t_inf, output_mem_config.value()); return masked_input; } @@ -49,12 +49,12 @@ Tensor ArgmaxOperation::invoke( bool is_width = (dim == (input_shape.rank() - 1)); Tensor max_val = ttnn::max(input_a, (int)dim, true, output_memory_config); Tensor max_tensor = ttnn::zeros_like(input_a); - Tensor tindex = numpy::index_width<::bfloat16>( + Tensor tindex = ttnn::index_width<::bfloat16>( input_shape, DataType::BFLOAT16, Layout::TILE, input_a.device(), output_memory_config); if (is_width) { max_tensor = ttnn::add(max_tensor, max_val, std::nullopt, output_memory_config); } else { - tindex = numpy::index_height<::bfloat16>( + tindex = ttnn::index_height<::bfloat16>( input_shape, DataType::BFLOAT16, Layout::TILE, input_a.device(), output_memory_config); max_tensor = ttnn::add(max_tensor, max_val, std::nullopt, output_memory_config); } @@ -93,10 +93,10 @@ Tensor ArgmaxOperation::invoke( concat_out = ttnn::reshape(concat_out, input_a.get_shape()); Tensor cmp_results = ttnn::eq(input_a, concat_out, std::nullopt, output_memory_config); concat_out.deallocate(); - Tensor tindex = numpy::index_channel<::bfloat16>( + Tensor tindex = ttnn::index_channel<::bfloat16>( input_shape, DataType::BFLOAT16, Layout::TILE, input_a.device(), output_memory_config); if (!is_channel) { - tindex = numpy::index_batch<::bfloat16>( + tindex = ttnn::index_batch<::bfloat16>( input_shape, DataType::BFLOAT16, Layout::TILE, input_a.device(), output_memory_config); } tindex = tindex.to(input_a.device()); @@ -119,7 +119,7 @@ Tensor ArgmaxOperation::invoke( } // TODO: Fix the index generation code. With the fix the code will work for argmax that return entire // maximum value index - Tensor tindex = numpy::index_all<::bfloat16>( + Tensor tindex = ttnn::index_all<::bfloat16>( input_shape, DataType::BFLOAT16, Layout::TILE, input_a.device(), output_memory_config); Tensor max_val = ttnn::max(input_a, std::nullopt, true, output_memory_config); Tensor max_tensor = ttnn::zeros_like(input_a); diff --git a/ttnn/cpp/ttnn/operations/numpy/functions.hpp b/ttnn/cpp/ttnn/operations/functions.hpp similarity index 99% rename from ttnn/cpp/ttnn/operations/numpy/functions.hpp rename to ttnn/cpp/ttnn/operations/functions.hpp index 51a3668eed1..ed0d598e990 100644 --- a/ttnn/cpp/ttnn/operations/numpy/functions.hpp +++ b/ttnn/cpp/ttnn/operations/functions.hpp @@ -17,8 +17,6 @@ namespace ttnn { -namespace numpy { - using tt::tt_metal::DataType; using tt::tt_metal::Device; using tt::tt_metal::Layout; @@ -554,12 +552,12 @@ static bool allclose(const Tensor& tensor_a, const Tensor& tensor_b, Args... arg auto tensor_b_buffer = tt::tt_metal::owned_buffer::get_as(tensor_b); for (int index = 0; index < tensor_a_buffer.size(); index++) { - if (not detail::nearly_equal(tensor_a_buffer[index], tensor_b_buffer[index], args...)) { + using ::ttnn::detail::nearly_equal; + if (not nearly_equal(tensor_a_buffer[index], tensor_b_buffer[index], args...)) { return false; } } return true; } -} // namespace numpy } // namespace ttnn diff --git a/ttnn/cpp/ttnn/operations/pool/upsample/device/upsample_bilinear_program_factory_multicore.cpp b/ttnn/cpp/ttnn/operations/pool/upsample/device/upsample_bilinear_program_factory_multicore.cpp index 7c1903d8d63..1f07bc081f5 100644 --- a/ttnn/cpp/ttnn/operations/pool/upsample/device/upsample_bilinear_program_factory_multicore.cpp +++ b/ttnn/cpp/ttnn/operations/pool/upsample/device/upsample_bilinear_program_factory_multicore.cpp @@ -15,7 +15,7 @@ #include "ttnn/operations/reduction/generic/device/reduce_op.hpp" // for reduce_op_utils #include "tt_metal/tt_stl/reflection.hpp" -#include "ttnn/operations/numpy/functions.hpp" +#include "ttnn/operations/functions.hpp" #include "ttnn/operations/sliding_window/sliding_window.hpp" #include "ttnn/operations/sliding_window/halo/halo.hpp" diff --git a/ttnn/cpp/ttnn/operations/reduction/prod/device/prod_op_all.cpp b/ttnn/cpp/ttnn/operations/reduction/prod/device/prod_op_all.cpp index ca0e4288dd5..7603863c7d9 100644 --- a/ttnn/cpp/ttnn/operations/reduction/prod/device/prod_op_all.cpp +++ b/ttnn/cpp/ttnn/operations/reduction/prod/device/prod_op_all.cpp @@ -8,7 +8,7 @@ #include "prod_op_all.hpp" #include "ttnn/operations/eltwise/unary/unary.hpp" #include "tt_metal/common/constants.hpp" -#include +#include #include "tt_metal/host_api.hpp" #include "tt_metal/tools/profiler/op_profiler.hpp" @@ -45,11 +45,11 @@ Tensor prod_all(const Tensor& input, const MemoryConfig& output_mem_config) { operation::run(Prod_op{.output_mem_config = output_mem_config}, {input}).at(0), output_mem_config); auto arch_env = tt_ClusterDescriptor::detect_arch((chip_id_t)0); if (arch_env == tt::ARCH::WORMHOLE_B0) { - return ttnn::numpy::prod_result_computation_WH_B0( + return ttnn::prod_result_computation_WH_B0( result, result.get_dtype(), result.get_layout(), result.device(), output_mem_config); } // else --> GS Arch - return ttnn::numpy::prod_result_computation_GS( + return ttnn::prod_result_computation_GS( result, result.get_dtype(), result.get_layout(), result.device(), output_mem_config); } diff --git a/ttnn/cpp/ttnn/operations/reduction/prod/prod.cpp b/ttnn/cpp/ttnn/operations/reduction/prod/prod.cpp index 3593bf99eab..ae3307c6dd7 100644 --- a/ttnn/cpp/ttnn/operations/reduction/prod/prod.cpp +++ b/ttnn/cpp/ttnn/operations/reduction/prod/prod.cpp @@ -9,7 +9,7 @@ #include "ttnn/cpp/ttnn/operations/creation.hpp" #include "ttnn/operations/data_movement/slice/slice.hpp" #include "ttnn/operations/data_movement/permute/permute.hpp" -#include "ttnn/operations/numpy/functions.hpp" +#include "ttnn/operations/functions.hpp" #include "ttnn/types.hpp" #include "ttnn/common/constants.hpp"