Skip to content

Commit

Permalink
#12795: Move numpy functions.hpp (#12817)
Browse files Browse the repository at this point in the history
* #12795: Move file and Update

* #12795: Change namespace

* #12795: Update numpy namepsace in files

* #12795: Update datatype

* #12795: Move to ttnn/cpp/ttnn/operations/numpy

* #12817: Update
  • Loading branch information
VirdhatchaniKN authored Sep 22, 2024
1 parent 31032c7 commit 62aeb39
Show file tree
Hide file tree
Showing 39 changed files with 194 additions and 194 deletions.
34 changes: 17 additions & 17 deletions tests/tt_eager/integration_tests/test_bert.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
#include "ttnn/operations/normalization/softmax/softmax.hpp"
#include "tt_metal/common/constants.hpp"
#include "tt_metal/host_api.hpp"
#include "tt_numpy/functions.hpp"
#include "ttnn/operations/numpy/functions.hpp"
#include "ttnn/operations/matmul/matmul.hpp"
#include "ttnn/operations/normalization/layernorm/layernorm.hpp"
#include "ttnn/operations/eltwise/binary/binary.hpp"
Expand Down Expand Up @@ -231,34 +231,34 @@ void test_bert() {
std::uint32_t hidden_size = num_heads * head_size;
std::uint32_t intermediate_size = hidden_size * 4;

auto attention_mask = tt::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {batch_size, 1, TILE_HEIGHT, sequence_size}, Layout::TILE).to(device, l1_memory_config);
auto attention_mask = ttnn::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {batch_size, 1, TILE_HEIGHT, sequence_size}, Layout::TILE).to(device, l1_memory_config);

auto parameters = Parameters{};
for (auto encoder_index = 0; encoder_index < num_encoders; encoder_index++) {
parameters.emplace(fmt::format("fused_qkv_weight_{}", encoder_index), tt::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, hidden_size, hidden_size * 3}, Layout::TILE).to(device, dram_memory_config));
parameters.emplace(fmt::format("fused_qkv_bias_{}", encoder_index), tt::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, hidden_size * 3}, Layout::TILE).to(device, dram_memory_config));
parameters.emplace(fmt::format("selfout_weight_{}", encoder_index), tt::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, hidden_size, hidden_size}, Layout::TILE).to(device, dram_memory_config));
parameters.emplace(fmt::format("selfout_bias_{}", encoder_index), tt::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, hidden_size}, Layout::TILE).to(device, dram_memory_config));
parameters.emplace(fmt::format("attention_layernorm_weight_{}", encoder_index), tt::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::ROW_MAJOR).to(device, dram_memory_config));
parameters.emplace(fmt::format("attention_layernorm_bias_{}", encoder_index), tt::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::ROW_MAJOR).to(device, dram_memory_config));
parameters.emplace(fmt::format("ff1_weight_{}", encoder_index), tt::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, hidden_size, intermediate_size}, Layout::TILE).to(device, dram_memory_config));
parameters.emplace(fmt::format("ff1_bias_{}", encoder_index), tt::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, intermediate_size}, Layout::TILE).to(device, dram_memory_config));
parameters.emplace(fmt::format("ff2_weight_{}", encoder_index), tt::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, intermediate_size, hidden_size}, Layout::TILE).to(device, dram_memory_config));
parameters.emplace(fmt::format("ff2_bias_{}", encoder_index), tt::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, hidden_size}, Layout::TILE).to(device, dram_memory_config));
parameters.emplace(fmt::format("feedforward_layernorm_weight_{}", encoder_index), tt::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::ROW_MAJOR).to(device, dram_memory_config));
parameters.emplace(fmt::format("feedforward_layernorm_bias_{}", encoder_index), tt::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::ROW_MAJOR).to(device, dram_memory_config));
parameters.emplace(fmt::format("fused_qkv_weight_{}", encoder_index), ttnn::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, hidden_size, hidden_size * 3}, Layout::TILE).to(device, dram_memory_config));
parameters.emplace(fmt::format("fused_qkv_bias_{}", encoder_index), ttnn::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, hidden_size * 3}, Layout::TILE).to(device, dram_memory_config));
parameters.emplace(fmt::format("selfout_weight_{}", encoder_index), ttnn::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, hidden_size, hidden_size}, Layout::TILE).to(device, dram_memory_config));
parameters.emplace(fmt::format("selfout_bias_{}", encoder_index), ttnn::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, hidden_size}, Layout::TILE).to(device, dram_memory_config));
parameters.emplace(fmt::format("attention_layernorm_weight_{}", encoder_index), ttnn::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::ROW_MAJOR).to(device, dram_memory_config));
parameters.emplace(fmt::format("attention_layernorm_bias_{}", encoder_index), ttnn::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::ROW_MAJOR).to(device, dram_memory_config));
parameters.emplace(fmt::format("ff1_weight_{}", encoder_index), ttnn::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, hidden_size, intermediate_size}, Layout::TILE).to(device, dram_memory_config));
parameters.emplace(fmt::format("ff1_bias_{}", encoder_index), ttnn::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, intermediate_size}, Layout::TILE).to(device, dram_memory_config));
parameters.emplace(fmt::format("ff2_weight_{}", encoder_index), ttnn::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, intermediate_size, hidden_size}, Layout::TILE).to(device, dram_memory_config));
parameters.emplace(fmt::format("ff2_bias_{}", encoder_index), ttnn::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, hidden_size}, Layout::TILE).to(device, dram_memory_config));
parameters.emplace(fmt::format("feedforward_layernorm_weight_{}", encoder_index), ttnn::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::ROW_MAJOR).to(device, dram_memory_config));
parameters.emplace(fmt::format("feedforward_layernorm_bias_{}", encoder_index), ttnn::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::ROW_MAJOR).to(device, dram_memory_config));
};
parameters.emplace("qa_head_weight", tt::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, hidden_size, TILE_WIDTH}, Layout::TILE).to(device, dram_memory_config));
parameters.emplace("qa_head_weight", ttnn::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, hidden_size, TILE_WIDTH}, Layout::TILE).to(device, dram_memory_config));
parameters.emplace(
"qa_head_bias",
ttnn::reshape(
tt::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::TILE).to(device, dram_memory_config),
ttnn::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {1, 1, TILE_HEIGHT, TILE_WIDTH}, Layout::TILE).to(device, dram_memory_config),
ttnn::Shape{tt::tt_metal::LegacyShape{{1, 1, 1, TILE_WIDTH}, {1, 1, TILE_HEIGHT, TILE_WIDTH}}}));

auto run_bert = [&]() {
tt::log_debug(tt::LogTest, "run_bert started");
auto begin = std::chrono::steady_clock::now();
auto hidden_states = tt::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {batch_size, 1, sequence_size, hidden_size}, Layout::TILE).to(device, l1_memory_config);
auto hidden_states = ttnn::numpy::random::uniform(bfloat16(-1.0f), bfloat16(1.0f), {batch_size, 1, sequence_size, hidden_size}, Layout::TILE).to(device, l1_memory_config);
for (auto encoder_index = 0; encoder_index < num_encoders; encoder_index++) {
hidden_states = encoder(std::move(hidden_states), attention_mask, parameters, encoder_index, head_size);
}
Expand Down
4 changes: 2 additions & 2 deletions tests/tt_eager/ops/test_average_pool.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

#include "ttnn/operations/pool/avgpool/avg_pool.hpp"
#include "ttnn/operations/experimental/auto_format/auto_format.hpp"
#include "tt_numpy/functions.hpp"
#include "ttnn/operations/numpy/functions.hpp"

#include "ttnn/tensor/tensor.hpp"
#include "common/constants.hpp"
Expand All @@ -17,7 +17,7 @@ using tt::tt_metal::LegacyShape;

Tensor run_avg_pool_2d_resnet(tt::tt_metal::LegacyShape& tensor_shape, Device* device) {
using ttnn::operations::experimental::auto_format::AutoFormat;
auto input_tensor = tt::numpy::random::random(tensor_shape, DataType::BFLOAT16);
auto input_tensor = ttnn::numpy::random::random(tensor_shape, DataType::BFLOAT16);
auto padded_input_shape = AutoFormat::pad_to_tile_shape(tensor_shape, false, false);
Tensor padded_input_tensor = input_tensor;
if (!AutoFormat::check_input_tensor_format(input_tensor, padded_input_shape)) {
Expand Down
22 changes: 11 additions & 11 deletions tests/tt_eager/ops/test_bcast_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
#include "ttnn/operations/data_movement/bcast/bcast.hpp"
#include "common/constants.hpp"
#include "third_party/magic_enum/magic_enum.hpp"
#include <tt_numpy/functions.hpp>
#include <ttnn/operations/numpy/functions.hpp>

#include <algorithm>
#include <functional>
Expand Down Expand Up @@ -56,8 +56,8 @@ int main(int argc, char **argv) {
throw std::runtime_error("Unsupported Dim!");
}

Tensor a = tt::numpy::random::random(input_shape_a).to(Layout::TILE).to(device);
Tensor b = tt::numpy::zeros({1, 1, TILE_HEIGHT, TILE_WIDTH}, DataType::BFLOAT16).to(Layout::TILE).to(device);
Tensor a = ttnn::numpy::random::random(input_shape_a).to(Layout::TILE).to(device);
Tensor b = ttnn::numpy::zeros({1, 1, TILE_HEIGHT, TILE_WIDTH}, DataType::BFLOAT16).to(Layout::TILE).to(device);

for (auto bcast_math: magic_enum::enum_values<ttnn::BcastOpMath>()) {
Tensor c = ttnn::bcast(0,a, b, bcast_math, bcast_dim);
Expand All @@ -73,29 +73,29 @@ int main(int argc, char **argv) {
}

{
Tensor a = tt::numpy::random::random({1, 1, 32, 4544}).to(Layout::TILE).to(device);
Tensor b = tt::numpy::zeros({1, 1, 32, 4544}, DataType::BFLOAT16).to(Layout::TILE).to(device);
Tensor a = ttnn::numpy::random::random({1, 1, 32, 4544}).to(Layout::TILE).to(device);
Tensor b = ttnn::numpy::zeros({1, 1, 32, 4544}, DataType::BFLOAT16).to(Layout::TILE).to(device);
Tensor c = ttnn::bcast(0, a, b, ttnn::BcastOpMath::MUL, ttnn::BcastOpDim::H);
Tensor d = c.cpu();
}

{
Tensor a = tt::numpy::random::random({1, 1, 32, 4544}).to(Layout::TILE).to(device);
Tensor b = tt::numpy::zeros({1, 1, 32, 4544}, DataType::BFLOAT16).to(Layout::TILE).to(device);
Tensor a = ttnn::numpy::random::random({1, 1, 32, 4544}).to(Layout::TILE).to(device);
Tensor b = ttnn::numpy::zeros({1, 1, 32, 4544}, DataType::BFLOAT16).to(Layout::TILE).to(device);
Tensor c = ttnn::bcast(0,a, b, ttnn::BcastOpMath::ADD, ttnn::BcastOpDim::H);
Tensor d = c.cpu();
}

{
Tensor a = tt::numpy::random::random({1, 71, 32, 32}).to(Layout::TILE).to(device);
Tensor b = tt::numpy::zeros({1, 1, 32, 32}, DataType::BFLOAT16).to(Layout::TILE).to(device);
Tensor a = ttnn::numpy::random::random({1, 71, 32, 32}).to(Layout::TILE).to(device);
Tensor b = ttnn::numpy::zeros({1, 1, 32, 32}, DataType::BFLOAT16).to(Layout::TILE).to(device);
Tensor c = ttnn::bcast(0,a, b, ttnn::BcastOpMath::MUL, ttnn::BcastOpDim::HW);
Tensor d = c.cpu();
}

{
Tensor a = tt::numpy::random::random({1, 71, 32, 64}).to(Layout::TILE).to(device);
Tensor b = tt::numpy::zeros({1, 1, 32, 32}, DataType::BFLOAT16).to(Layout::TILE).to(device);
Tensor a = ttnn::numpy::random::random({1, 71, 32, 64}).to(Layout::TILE).to(device);
Tensor b = ttnn::numpy::zeros({1, 1, 32, 32}, DataType::BFLOAT16).to(Layout::TILE).to(device);
Tensor c = ttnn::bcast(0,a, b, ttnn::BcastOpMath::MUL, ttnn::BcastOpDim::HW);
Tensor d = c.cpu();
}
Expand Down
8 changes: 4 additions & 4 deletions tests/tt_eager/ops/test_bmm_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
#include "ttnn/tensor/tensor.hpp"
#include "ttnn/operations/matmul/device/matmul_op.hpp"
#include "common/constants.hpp"
#include "tt_numpy/functions.hpp"
#include "ttnn/operations/numpy/functions.hpp"

#include <algorithm>
#include <functional>
Expand Down Expand Up @@ -46,9 +46,9 @@ int main(int argc, char **argv) {
tt::tt_metal::LegacyShape shapeb1 = {1, 1, Kt*TILE_HEIGHT, Nt*TILE_WIDTH};

// Allocates a DRAM buffer on device populated with values specified by initialize
Tensor a = tt::numpy::random::random(shapea).to(Layout::TILE).to(device);
Tensor b = tt::numpy::zeros(shapeb, DataType::BFLOAT16).to(Layout::TILE).to(device);
Tensor b1 = tt::numpy::zeros(shapeb1, DataType::BFLOAT16).to(Layout::TILE).to(device);
Tensor a = ttnn::numpy::random::random(shapea).to(Layout::TILE).to(device);
Tensor b = ttnn::numpy::zeros(shapeb, DataType::BFLOAT16).to(Layout::TILE).to(device);
Tensor b1 = ttnn::numpy::zeros(shapeb1, DataType::BFLOAT16).to(Layout::TILE).to(device);

Tensor mm = ttnn::operations::matmul::matmul(a, b, /*bias=*/std::nullopt,
ttnn::operations::matmul::Matmul{/*program_config=*/std::nullopt, /*bcast_batch=*/std::nullopt,operation::DEFAULT_OUTPUT_MEMORY_CONFIG, /*output_dtype=*/std::nullopt, /*compute_kernel_config=*/std::nullopt, /*untilize_out=*/false, /*user_core_coord=*/std::nullopt, /*user_fused_activation=*/std::nullopt, /*user_run_batched=*/true}).cpu();
Expand Down
10 changes: 5 additions & 5 deletions tests/tt_eager/ops/test_eltwise_binary_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
#include "ttnn/tensor/host_buffer/types.hpp"
#include "ttnn/tensor/tensor.hpp"
#include "ttnn/operations/eltwise/binary/binary.hpp"
#include "tt_numpy/functions.hpp"
#include "ttnn/operations/numpy/functions.hpp"

using tt::tt_metal::DataType;
using tt::tt_metal::Device;
Expand All @@ -32,13 +32,13 @@ Tensor host_function(const Tensor& input_tensor_a, const Tensor& input_tensor_b)

template <auto HostFunction, typename DeviceFunction, typename... Args>
bool run_test(const tt::tt_metal::LegacyShape& shape, const DeviceFunction& device_function, Device* device, Args... args) {
auto input_tensor_a = tt::numpy::random::random(shape, DataType::BFLOAT16);
auto input_tensor_b = tt::numpy::random::random(shape, DataType::BFLOAT16);
auto input_tensor_a = ttnn::numpy::random::random(shape, DataType::BFLOAT16);
auto input_tensor_b = ttnn::numpy::random::random(shape, DataType::BFLOAT16);

auto host_output = HostFunction(input_tensor_a, input_tensor_b);
auto device_output = device_function(input_tensor_a.to(Layout::TILE).to(device), input_tensor_b.to(Layout::TILE).to(device)).cpu().to(Layout::ROW_MAJOR);

return tt::numpy::allclose<bfloat16>(host_output, device_output, args...);
return ttnn::numpy::allclose<bfloat16>(host_output, device_output, args...);
}

int main() {
Expand Down Expand Up @@ -108,7 +108,7 @@ int main() {

// Allocate a tensor to show that the addresses aren't cached
auto input_tensor =
tt::numpy::random::uniform(bfloat16(0.0f), bfloat16(0.0f), {1, 1, 32, 32}).to(Layout::TILE).to(device);
ttnn::numpy::random::uniform(bfloat16(0.0f), bfloat16(0.0f), {1, 1, 32, 32}).to(Layout::TILE).to(device);

run_binary_ops();

Expand Down
Loading

0 comments on commit 62aeb39

Please sign in to comment.