Skip to content

Commit

Permalink
#9874: Update files
Browse files Browse the repository at this point in the history
  • Loading branch information
VirdhatchaniKN authored and Aswinmcw committed Jul 10, 2024
1 parent 951e91c commit 306c5db
Show file tree
Hide file tree
Showing 20 changed files with 29 additions and 48 deletions.
6 changes: 0 additions & 6 deletions docs/source/ttnn/ttnn/ttnn/binary_eq_bw.rst

This file was deleted.

6 changes: 6 additions & 0 deletions docs/source/ttnn/ttnn/ttnn/eq_bw.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
.. _ttnn.eq_bw:

ttnn.eq_bw
#################

.. autofunction:: ttnn.eq_bw
6 changes: 0 additions & 6 deletions docs/source/ttnn/ttnn/ttnn/unary_eq_bw.rst

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

import torch
import pytest
import tt_lib
import ttnn
from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import data_gen_with_range, compare_pcc

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

import torch
import pytest
import tt_lib
import ttnn
from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import data_gen_with_range, compare_pcc

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

import torch
import pytest
import tt_lib
import ttnn
from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import data_gen_with_range, compare_pcc

Expand Down Expand Up @@ -50,9 +49,7 @@ def test_bw_binary_assign(input_shapes, device):
grad_data, grad_tensor = data_gen_with_range(input_shapes, -100, 100, device)

tt_output_tensor_on_device = ttnn.assign_bw(grad_tensor, input_tensor, other_tensor)
print(tt_output_tensor_on_device)
print(grad_tensor)
print(input_tensor, other_tensor)

in_data.retain_grad()

pyt_y = torch.clone(in_data)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

import torch
import pytest
import tt_lib
import ttnn
from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import (
data_gen_with_range,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

import torch
import pytest
import tt_lib
import ttnn
from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import compare_pcc, data_gen_with_range

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

import torch
import pytest
import tt_lib
import ttnn
from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import data_gen_with_range, compare_pcc

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

import torch
import pytest
import tt_lib
import ttnn
from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import data_gen_with_range, compare_pcc

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

import torch
import pytest
import tt_lib
import ttnn
from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import data_gen_with_range, compare_pcc

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

import torch
import pytest
import tt_lib
import ttnn
from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import data_gen_with_range, compare_pcc

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

import torch
import pytest
import tt_lib
import ttnn
from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import data_gen_with_range, compare_pcc

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

import torch
import pytest
import tt_lib
import ttnn
from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import data_gen_with_range, compare_pcc

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

import torch
import pytest
import tt_lib
import ttnn
from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import data_gen_with_range, compare_pcc

Expand Down
1 change: 1 addition & 0 deletions ttnn/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ set(TTNN_SRCS
${CMAKE_CURRENT_SOURCE_DIR}/cpp/ttnn/operations/upsample/device/upsample_op_multi_core.cpp
${CMAKE_CURRENT_SOURCE_DIR}/cpp/ttnn/operations/upsample/device/upsample_op_single_core.cpp
${CMAKE_CURRENT_SOURCE_DIR}/cpp/ttnn/operations/eltwise/unary/device/unary_op.cpp
${CMAKE_CURRENT_SOURCE_DIR}/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.cpp
${CMAKE_CURRENT_SOURCE_DIR}/cpp/ttnn/operations/examples/example/device/example_device_operation.cpp
${CMAKE_CURRENT_SOURCE_DIR}/cpp/ttnn/operations/examples/example/device/single_core_program_factory.cpp
${CMAKE_CURRENT_SOURCE_DIR}/cpp/ttnn/operations/examples/example/device/multi_core_program_factory.cpp
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,10 @@
#include "tt_metal/host_api.hpp"
#include "tt_metal/tools/profiler/op_profiler.hpp"
#include "ttnn/operations/eltwise/unary/unary.hpp"
#include "ttnn/operations/eltwise/binary/binary.hpp"

namespace ttnn::operations::unary_backward {

namespace utils {


std::vector<ttnn::Tensor> _mul_bw(
const Tensor& grad, const Tensor& input, float scalar, const MemoryConfig& output_mem_config) {
std::vector<Tensor> grad_tensor;
Expand All @@ -36,7 +34,6 @@ std::vector<Tensor> _clamp_min_bw(
return grad_tensor;
}


std::vector<Tensor> _clamp_bw(
const Tensor& grad, const Tensor& input, float min, float max, const MemoryConfig& output_mem_config) {
std::vector<Tensor> grad_tensor;
Expand Down Expand Up @@ -81,7 +78,7 @@ std::vector<Tensor> _add_bw(
return grad_tensor;
}

std::vector<Tensor> _unary_comp_bw(const Tensor& grad, const Tensor& input, float other, const MemoryConfig& output_mem_config) {
std::vector<Tensor> _unary_comp_bw(const Tensor& grad, const MemoryConfig& output_mem_config) {
std::vector<Tensor> grad_tensor;
Tensor zero_grad = tt::tt_metal::zeros_like(grad, output_mem_config);
grad_tensor.emplace_back(zero_grad);
Expand All @@ -90,10 +87,10 @@ std::vector<Tensor> _unary_comp_bw(const Tensor& grad, const Tensor& input, floa

std::vector<Tensor> _eq_bw(
const Tensor& grad, const Tensor& input, float other, const MemoryConfig& output_mem_config) {
return _unary_comp_bw(grad, input, other, output_mem_config);
return _unary_comp_bw(grad, output_mem_config);
}

std::function<std::vector<ttnn::Tensor>(const Tensor&, const Tensor&, const MemoryConfig&)> get_function_type1(UnaryBackwardOpType OpType){
std::function<std::vector<ttnn::Tensor>(const Tensor&, const Tensor&, const MemoryConfig&)> UnaryBackwardFunction::get_function_type1(UnaryBackwardOpType OpType){
switch (OpType) {
case UnaryBackwardOpType::ASSIGN_BW:
return _assign_bw;
Expand All @@ -105,7 +102,7 @@ std::function<std::vector<ttnn::Tensor>(const Tensor&, const Tensor&, const Memo
}
}

std::function<std::vector<ttnn::Tensor>(const Tensor&, const Tensor&, float, const MemoryConfig&)> get_function_type1_w_float(UnaryBackwardOpType OpType){
std::function<std::vector<ttnn::Tensor>(const Tensor&, const Tensor&, float, const MemoryConfig&)> UnaryBackwardFunction::get_function_type1_w_float(UnaryBackwardOpType OpType){
switch (OpType) {
case UnaryBackwardOpType::MUL_BW:
return _mul_bw;
Expand All @@ -121,7 +118,7 @@ std::function<std::vector<ttnn::Tensor>(const Tensor&, const Tensor&, float, con
}
}

std::function<std::vector<ttnn::Tensor>(const Tensor&, const Tensor&, float, float, const MemoryConfig&)> get_function_type1_w_two_float(UnaryBackwardOpType OpType){
std::function<std::vector<ttnn::Tensor>(const Tensor&, const Tensor&, float, float, const MemoryConfig&)> UnaryBackwardFunction::get_function_type1_w_two_float(UnaryBackwardOpType OpType){
switch (OpType) {
case UnaryBackwardOpType::CLAMP_BW:
return _clamp_bw;
Expand All @@ -131,6 +128,4 @@ std::function<std::vector<ttnn::Tensor>(const Tensor&, const Tensor&, float, flo
}
}

}

} // namespace ttnn::operations::unary
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

#include <functional>
#include <optional>

#include "tensor/tensor.hpp"
#include "third_party/magic_enum/magic_enum.hpp"

namespace ttnn::operations::unary_backward {
Expand All @@ -22,5 +22,10 @@ enum class UnaryBackwardOpType {
EQ_BW,
};

struct UnaryBackwardFunction{
static std::function<std::vector<ttnn::Tensor>(const Tensor&, const Tensor&, const MemoryConfig&)> get_function_type1(UnaryBackwardOpType OpType);
static std::function<std::vector<ttnn::Tensor>(const Tensor&, const Tensor&, float, const MemoryConfig&)> get_function_type1_w_float(UnaryBackwardOpType OpType);
static std::function<std::vector<ttnn::Tensor>(const Tensor&, const Tensor&, float, float, const MemoryConfig&)> get_function_type1_w_two_float(UnaryBackwardOpType OpType);
};

} // namespace ttnn::operations::unary
} // namespace ttnn::operations::unary_backward
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

#pragma once

#include "device/unary_backward_op.cpp"
#include "device/unary_backward_op.hpp"
#include "ttnn/device_operation.hpp"
#include "ttnn/operations/data_movement.hpp"

Expand All @@ -29,7 +29,7 @@ struct ExecuteUnaryBackward {
const Tensor &input_tensor_arg,
const std::optional<MemoryConfig> &memory_config = std::nullopt) {

auto op_type = utils::get_function_type1(unary_backward_op_type);
auto op_type = UnaryBackwardFunction::get_function_type1(unary_backward_op_type);
auto output_memory_config = memory_config.value_or(input_tensor_arg.memory_config());
return op_type(grad_tensor_arg, input_tensor_arg, output_memory_config);
}
Expand All @@ -42,7 +42,7 @@ struct ExecuteUnaryBackward {
float alpha,
const std::optional<MemoryConfig> &memory_config = std::nullopt) {

auto op_type = utils::get_function_type1_w_float(unary_backward_op_type);
auto op_type = UnaryBackwardFunction::get_function_type1_w_float(unary_backward_op_type);
auto output_memory_config = memory_config.value_or(input_tensor_arg.memory_config());
return op_type(grad_tensor_arg, input_tensor_arg, alpha, output_memory_config);
}
Expand All @@ -56,7 +56,7 @@ struct ExecuteUnaryBackward {
float b,
const std::optional<MemoryConfig> &memory_config = std::nullopt) {

auto op_type = utils::get_function_type1_w_two_float(unary_backward_op_type);
auto op_type = UnaryBackwardFunction::get_function_type1_w_two_float(unary_backward_op_type);
auto output_memory_config = memory_config.value_or(input_tensor_arg.memory_config());
return op_type(grad_tensor_arg, input_tensor_arg, a, b, output_memory_config);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ void py_module(py::module& module) {
detail::bind_unary_backward(
module,
ttnn::mul_bw,
R"doc(Performs backward operations for multiply on :attr:`input_tensor`, :attr:`alpha` with given :attr:`grad_tensor`.)doc");
R"doc(Performs backward operations for multiply on :attr:`input_tensor`, :attr:`alpha` or attr:`input_tensor_a`, attr:`input_tensor_b` with given :attr:`grad_tensor`.)doc");

detail::bind_unary_backward(
module,
Expand All @@ -175,7 +175,7 @@ void py_module(py::module& module) {
detail::bind_unary_backward(
module,
ttnn::assign_bw,
R"doc(Performs backward operations for assign on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc");
R"doc(Performs backward operations for assign on :attr:`input_tensor` or attr:`input_tensor_a`, attr:`input_tensor_b` with given :attr:`grad_tensor`.)doc");

detail::bind_unary_backward(
module,
Expand All @@ -187,12 +187,12 @@ void py_module(py::module& module) {
detail::bind_unary_backward(
module,
ttnn::add_bw,
R"doc(Performs backward operations for addition on :attr:`input_tensor`, :attr:`alpha` with given :attr:`grad_tensor`.)doc");
R"doc(Performs backward operations for addition on :attr:`input_tensor`, :attr:`alpha` or attr:`input_tensor_a`, attr:`input_tensor_b` with given :attr:`grad_tensor`.)doc");

detail::bind_unary_backward(
module,
ttnn::eq_bw,
R"doc(Performs backward operations for equal to comparison on :attr:`input_tensor`, :attr:`alpha` or attr:`input_tensor_b` with given :attr:`grad_tensor`.
R"doc(Performs backward operations for equal to comparison on :attr:`input_tensor`, :attr:`alpha` or attr:`input_tensor_a`, attr:`input_tensor_b` with given :attr:`grad_tensor`.
Returns an tensor of zeros like input tensors.)doc");

}
Expand Down

0 comments on commit 306c5db

Please sign in to comment.