From 40c5a1f8cbcf9ab09e05753e6a277cc75ab16f2d Mon Sep 17 00:00:00 2001 From: mouliraj-mcw Date: Tue, 9 Jul 2024 09:49:12 +0000 Subject: [PATCH] #9874: Merge log_bw to TTNN --- docs/source/ttnn/ttnn/api.rst | 1 + docs/source/ttnn/ttnn/dependencies/tt_lib.rst | 2 -- docs/source/ttnn/ttnn/ttnn/floor_bw.rst | 6 ++++++ docs/source/ttnn/ttnn/ttnn/log_bw.rst | 6 ++++++ docs/source/ttnn/ttnn/ttnn/round_bw.rst | 6 ++++++ .../backward_ops/test_backward_floor.py | 1 - .../backward_ops/test_backward_log.py | 6 +++--- .../backward_ops/test_backward_round.py | 1 - .../op_library/backward/backward_ops.cpp | 20 ------------------- .../op_library/backward/backward_ops.hpp | 5 ----- .../tt_lib_bindings_tensor_backward_ops.cpp | 16 --------------- .../device/unary_backward_op.cpp | 20 +++++++++++++++++++ .../device/unary_backward_op.hpp | 1 + .../eltwise/unary_backward/unary_backward.hpp | 1 + .../unary_backward/unary_backward_pybind.hpp | 9 +++++++-- 15 files changed, 51 insertions(+), 50 deletions(-) create mode 100644 docs/source/ttnn/ttnn/ttnn/floor_bw.rst create mode 100644 docs/source/ttnn/ttnn/ttnn/log_bw.rst create mode 100644 docs/source/ttnn/ttnn/ttnn/round_bw.rst diff --git a/docs/source/ttnn/ttnn/api.rst b/docs/source/ttnn/ttnn/api.rst index 658b38b02d8f..168d3d4e253f 100644 --- a/docs/source/ttnn/ttnn/api.rst +++ b/docs/source/ttnn/ttnn/api.rst @@ -175,6 +175,7 @@ Pointwise Unary ttnn/eq_bw ttnn/floor_bw ttnn/round_bw + ttnn/log_bw Pointwise Binary ================ diff --git a/docs/source/ttnn/ttnn/dependencies/tt_lib.rst b/docs/source/ttnn/ttnn/dependencies/tt_lib.rst index 818b31171c0c..8f69e36339d7 100644 --- a/docs/source/ttnn/ttnn/dependencies/tt_lib.rst +++ b/docs/source/ttnn/ttnn/dependencies/tt_lib.rst @@ -843,8 +843,6 @@ Backward Operations .. autofunction:: tt_lib.tensor.unary_sub_bw -.. autofunction:: tt_lib.tensor.log_bw - .. autofunction:: tt_lib.tensor.abs_bw .. autofunction:: tt_lib.tensor.complex_abs_bw diff --git a/docs/source/ttnn/ttnn/ttnn/floor_bw.rst b/docs/source/ttnn/ttnn/ttnn/floor_bw.rst new file mode 100644 index 000000000000..54e31216ba60 --- /dev/null +++ b/docs/source/ttnn/ttnn/ttnn/floor_bw.rst @@ -0,0 +1,6 @@ +.. _ttnn.floor_bw: + + ttnn.floor_bw + ############## + + .. autofunction:: ttnn.floor_bw diff --git a/docs/source/ttnn/ttnn/ttnn/log_bw.rst b/docs/source/ttnn/ttnn/ttnn/log_bw.rst new file mode 100644 index 000000000000..027646a942e9 --- /dev/null +++ b/docs/source/ttnn/ttnn/ttnn/log_bw.rst @@ -0,0 +1,6 @@ +.. _ttnn.log_bw: + + ttnn.log_bw + ############ + + .. autofunction:: ttnn.log_bw diff --git a/docs/source/ttnn/ttnn/ttnn/round_bw.rst b/docs/source/ttnn/ttnn/ttnn/round_bw.rst new file mode 100644 index 000000000000..8dbf21e706af --- /dev/null +++ b/docs/source/ttnn/ttnn/ttnn/round_bw.rst @@ -0,0 +1,6 @@ +.. _ttnn.round_bw: + + ttnn.round_bw + ############## + + .. autofunction:: ttnn.round_bw diff --git a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_floor.py b/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_floor.py index 8d71f8228653..c9efe6c5e86d 100644 --- a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_floor.py +++ b/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_floor.py @@ -4,7 +4,6 @@ import torch import pytest -import tt_lib import ttnn from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import compare_pcc, data_gen_with_range diff --git a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_log.py b/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_log.py index c404ff025c49..3418ea67dfae 100644 --- a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_log.py +++ b/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_log.py @@ -4,7 +4,7 @@ import torch import pytest -import tt_lib +import ttnn from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import ( data_gen_with_val, compare_pcc, @@ -23,7 +23,7 @@ def test_bw_log_0(input_shapes, device): in_data, input_tensor = data_gen_with_val(input_shapes, device, True, val=0) grad_data, grad_tensor = data_gen_with_range(input_shapes, -1, 1, device) - tt_output_tensor_on_device = tt_lib.tensor.log_bw(grad_tensor, input_tensor) + tt_output_tensor_on_device = ttnn.log_bw(grad_tensor, input_tensor) in_data.retain_grad() @@ -47,7 +47,7 @@ def test_bw_log_0(input_shapes, device): def test_bw_log(input_shapes, device): in_data, input_tensor = data_gen_with_range(input_shapes, -100, 100, device, True) grad_data, grad_tensor = data_gen_with_range(input_shapes, -100, 100, device) - tt_output_tensor_on_device = tt_lib.tensor.log_bw(grad_tensor, input_tensor) + tt_output_tensor_on_device = ttnn.log_bw(grad_tensor, input_tensor) in_data.retain_grad() diff --git a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_round.py b/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_round.py index 3d5f91223599..baabffc00095 100644 --- a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_round.py +++ b/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_round.py @@ -4,7 +4,6 @@ import torch import pytest -import tt_lib import ttnn from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import compare_pcc, data_gen_with_range diff --git a/tt_eager/tt_dnn/op_library/backward/backward_ops.cpp b/tt_eager/tt_dnn/op_library/backward/backward_ops.cpp index 94070e7e6298..7d92284885d6 100644 --- a/tt_eager/tt_dnn/op_library/backward/backward_ops.cpp +++ b/tt_eager/tt_dnn/op_library/backward/backward_ops.cpp @@ -343,26 +343,6 @@ std::vector ne_bw(const Tensor& grad, const MemoryConfig& output_mem_con return operation::decorate_as_composite(__func__, _ne_bw)(grad, output_mem_config); } -std::vector _log_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { - std::vector grad_tensor; - Tensor grad_a = ttnn::multiply(grad, recip(input, output_mem_config), std::nullopt, output_mem_config); - Tensor t_inf = full_like(input, std::numeric_limits::infinity(), output_mem_config); - Tensor t_nan = full_like(input, std::nanf(""), output_mem_config); - grad_tensor.emplace_back(where( - eqz(input, output_mem_config), - where( - eqz(grad, output_mem_config), - t_nan, - ttnn::multiply(t_inf, sign(grad, output_mem_config), std::nullopt, output_mem_config), - output_mem_config), - grad_a, - output_mem_config)); - return grad_tensor; -} -std::vector log_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { - return operation::decorate_as_composite(__func__, _log_bw)(grad, input, output_mem_config); -} - std::vector _abs_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { std::vector grad_tensor; Tensor result = ttnn::multiply(grad, sign(input, output_mem_config), std::nullopt, output_mem_config); diff --git a/tt_eager/tt_dnn/op_library/backward/backward_ops.hpp b/tt_eager/tt_dnn/op_library/backward/backward_ops.hpp index f7ac39b238ae..54a6a50b924c 100644 --- a/tt_eager/tt_dnn/op_library/backward/backward_ops.hpp +++ b/tt_eager/tt_dnn/op_library/backward/backward_ops.hpp @@ -120,11 +120,6 @@ std::vector unary_sub_bw( const Tensor& input, const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG); -std::vector log_bw( - const Tensor& grad, - const Tensor& input, - const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG); - std::vector binary_le_bw( const Tensor& grad, const Tensor& input, diff --git a/tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp b/tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp index 9289ca1dae45..cf087a416fb5 100644 --- a/tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp +++ b/tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp @@ -425,22 +425,6 @@ namespace tt::tt_metal::detail{ "output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No" )doc"); - m_tensor.def("log_bw", &tt::tt_metal::log_bw, - py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc( - Performs backward operations for logarithm of ``input`` tensors with given ``grad``. - - Input tensors must have BFLOAT16 data type. - - Output tensors will have BFLOAT16 data type. - - .. csv-table:: - :header: "Argument", "Description", "Data type", "Valid range", "Required" - - "grad", "Gradient tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes" - "input", "Tensor add is applied to", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes" - "output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No" - )doc"); - m_tensor.def("abs_bw", &tt::tt_metal::abs_bw, py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc( Performs backward operations for abs of ``input`` tensors with given ``grad``. diff --git a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.cpp b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.cpp index 08a32c19b220..d5b042a4cb78 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.cpp +++ b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.cpp @@ -99,6 +99,7 @@ std::vector _floor_bw(const Tensor& grad, const Tensor& input, const Mem grad_tensor.emplace_back(t_zero); return grad_tensor; } + std::vector _round_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { std::vector grad_tensor; Tensor t_zero = tt::tt_metal::zeros_like(grad, output_mem_config); @@ -106,6 +107,23 @@ std::vector _round_bw(const Tensor& grad, const Tensor& input, const Mem return grad_tensor; } +std::vector _log_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { + std::vector grad_tensor; + Tensor grad_a = ttnn::multiply(grad, ttnn::reciprocal(input, output_mem_config), std::nullopt, output_mem_config); + Tensor t_inf = tt::tt_metal::full_like(input, std::numeric_limits::infinity(), output_mem_config); + Tensor t_nan = tt::tt_metal::full_like(input, std::nanf(""), output_mem_config); + grad_tensor.emplace_back(where( + ttnn::eqz(input, output_mem_config), + where( + ttnn::eqz(grad, output_mem_config), + t_nan, + ttnn::multiply(t_inf, ttnn::sign(grad, output_mem_config), std::nullopt, output_mem_config), + output_mem_config), + grad_a, + output_mem_config)); + return grad_tensor; +} + std::function(const Tensor&, const Tensor&, const MemoryConfig&)> get_function_type1(UnaryBackwardOpType OpType){ switch (OpType) { case UnaryBackwardOpType::ASSIGN_BW: @@ -116,6 +134,8 @@ std::function(const Tensor&, const Tensor&, const Memo return _floor_bw; case UnaryBackwardOpType::ROUND_BW: return _round_bw; + case UnaryBackwardOpType::LOG_BW: + return _log_bw; default: TT_ASSERT(false && "Undefined op type"); return 0; diff --git a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.hpp b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.hpp index 79a460eaa562..9ed0a32f2d9d 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.hpp +++ b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.hpp @@ -22,6 +22,7 @@ enum class UnaryBackwardOpType { EQ_BW, FLOOR_BW, ROUND_BW, + LOG_BW, }; diff --git a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward.hpp b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward.hpp index 10105d1f9019..411187bf03bf 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward.hpp +++ b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward.hpp @@ -74,6 +74,7 @@ constexpr auto add_bw = ttnn::register_operation>("ttnn::eq_bw"); constexpr auto floor_bw = ttnn::register_operation>("ttnn::floor_bw"); constexpr auto round_bw = ttnn::register_operation>("ttnn::round_bw"); +constexpr auto log_bw = ttnn::register_operation>("ttnn::log_bw"); } // namespace ttnn diff --git a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp index 1a0e8a5d3cab..4e42a5da619d 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp +++ b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp @@ -198,12 +198,17 @@ void py_module(py::module& module) { detail::bind_unary_backward( module, ttnn::floor_bw, - R"doc(Performs backward operations for floor on :attr:`input_tensor` or attr:`input_tensor_a`, attr:`input_tensor_b` with given :attr:`grad_tensor`.)doc"); + R"doc(Performs backward operations for floor on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); detail::bind_unary_backward( module, ttnn::round_bw, - R"doc(Performs backward operations for round on :attr:`input_tensor` or attr:`input_tensor_a`, attr:`input_tensor_b` with given :attr:`grad_tensor`.)doc"); + R"doc(Performs backward operations for round on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); + + detail::bind_unary_backward( + module, + ttnn::log_bw, + R"doc(Performs backward operations for logarithm on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); }