From 071badb6458183b46602a250a933c57ad49caf4f Mon Sep 17 00:00:00 2001 From: mouliraj-mcw Date: Tue, 9 Jul 2024 18:01:33 +0000 Subject: [PATCH] #10071: Merge abs_bw to TTNN --- docs/source/ttnn/ttnn/api.rst | 1 + docs/source/ttnn/ttnn/dependencies/tt_lib.rst | 2 -- docs/source/ttnn/ttnn/ttnn/abs_bw.rst | 6 ++++++ .../backward_ops/test_backward_abs.py | 4 ++-- .../tt_dnn/op_library/backward/backward_ops.cpp | 10 ---------- .../tt_dnn/op_library/backward/backward_ops.hpp | 5 ----- .../csrc/tt_lib_bindings_tensor_backward_ops.cpp | 16 ---------------- .../unary_backward/device/unary_backward_op.cpp | 9 +++++++++ .../unary_backward/device/unary_backward_op.hpp | 3 ++- .../eltwise/unary_backward/unary_backward.hpp | 1 + .../unary_backward/unary_backward_pybind.hpp | 5 +++++ 11 files changed, 26 insertions(+), 36 deletions(-) create mode 100644 docs/source/ttnn/ttnn/ttnn/abs_bw.rst diff --git a/docs/source/ttnn/ttnn/api.rst b/docs/source/ttnn/ttnn/api.rst index 52aace69db81..aee7196af846 100644 --- a/docs/source/ttnn/ttnn/api.rst +++ b/docs/source/ttnn/ttnn/api.rst @@ -177,6 +177,7 @@ Pointwise Unary ttnn/round_bw ttnn/log_bw ttnn/relu6_bw + ttnn/abs_bw Pointwise Binary ================ diff --git a/docs/source/ttnn/ttnn/dependencies/tt_lib.rst b/docs/source/ttnn/ttnn/dependencies/tt_lib.rst index 714a04cf32e5..4eb96540a5a9 100644 --- a/docs/source/ttnn/ttnn/dependencies/tt_lib.rst +++ b/docs/source/ttnn/ttnn/dependencies/tt_lib.rst @@ -820,8 +820,6 @@ Backward Operations .. autofunction:: tt_lib.tensor.unary_sub_bw -.. autofunction:: tt_lib.tensor.abs_bw - .. autofunction:: tt_lib.tensor.complex_abs_bw .. autofunction:: tt_lib.tensor.rsqrt_bw diff --git a/docs/source/ttnn/ttnn/ttnn/abs_bw.rst b/docs/source/ttnn/ttnn/ttnn/abs_bw.rst new file mode 100644 index 000000000000..1f39180f8342 --- /dev/null +++ b/docs/source/ttnn/ttnn/ttnn/abs_bw.rst @@ -0,0 +1,6 @@ +.. _ttnn.abs_bw: + + ttnn.abs_bw + ############ + + .. autofunction:: ttnn.abs_bw diff --git a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_abs.py b/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_abs.py index 83e419f7c652..02b5481f7ec3 100644 --- a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_abs.py +++ b/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_abs.py @@ -4,7 +4,7 @@ import torch import pytest -import tt_lib +import ttnn from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import data_gen_with_range, compare_pcc @@ -22,7 +22,7 @@ def test_bw_abs(input_shapes, device): pyt_y = torch.abs(in_data) - tt_output_tensor_on_device = tt_lib.tensor.abs_bw(grad_tensor, input_tensor) + tt_output_tensor_on_device = ttnn.abs_bw(grad_tensor, input_tensor) in_data.retain_grad() diff --git a/tt_eager/tt_dnn/op_library/backward/backward_ops.cpp b/tt_eager/tt_dnn/op_library/backward/backward_ops.cpp index da75d1618333..17eca7dcbd1a 100644 --- a/tt_eager/tt_dnn/op_library/backward/backward_ops.cpp +++ b/tt_eager/tt_dnn/op_library/backward/backward_ops.cpp @@ -343,16 +343,6 @@ std::vector ne_bw(const Tensor& grad, const MemoryConfig& output_mem_con return operation::decorate_as_composite(__func__, _ne_bw)(grad, output_mem_config); } -std::vector _abs_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { - std::vector grad_tensor; - Tensor result = ttnn::multiply(grad, sign(input, output_mem_config), std::nullopt, output_mem_config); - grad_tensor.emplace_back(result); - return grad_tensor; -} -std::vector abs_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { - return operation::decorate_as_composite(__func__, _abs_bw)(grad, input, output_mem_config); -} - std::vector _rsqrt_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { std::vector grad_tensor; Tensor rsqrt_result = power(rsqrt(input, true, output_mem_config), 3, output_mem_config); diff --git a/tt_eager/tt_dnn/op_library/backward/backward_ops.hpp b/tt_eager/tt_dnn/op_library/backward/backward_ops.hpp index 780c9ee3dcd9..eee56198e927 100644 --- a/tt_eager/tt_dnn/op_library/backward/backward_ops.hpp +++ b/tt_eager/tt_dnn/op_library/backward/backward_ops.hpp @@ -125,11 +125,6 @@ std::vector binary_le_bw( const Tensor& input, const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG); -std::vector abs_bw( - const Tensor& grad, - const Tensor& input, - const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG); - std::vector complex_abs_bw( const Tensor& grad, const Tensor& input, diff --git a/tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp b/tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp index 4bfdae5fe717..a3ad5d741d2a 100644 --- a/tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp +++ b/tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp @@ -425,22 +425,6 @@ namespace tt::tt_metal::detail{ "output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No" )doc"); - m_tensor.def("abs_bw", &tt::tt_metal::abs_bw, - py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc( - Performs backward operations for abs of ``input`` tensors with given ``grad``. - - Input tensors must have BFLOAT16 data type. - - Output tensor will have BFLOAT16 data type. - - .. csv-table:: - :header: "Argument", "Description", "Data type", "Valid range", "Required" - - "grad", "Gradient tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes" - "input", "Tensor add is applied to", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes" - "output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No" - )doc"); - m_tensor.def("complex_abs_bw", py::overload_cast(&complex_abs_bw), py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc( Performs backward operations for abs of complex ``input`` tensor with given ``grad``. diff --git a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.cpp b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.cpp index dda9428bf0ea..c8644f0af588 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.cpp +++ b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.cpp @@ -147,6 +147,13 @@ std::vector _relu6_bw(const Tensor& grad, const Tensor& input, const Mem return grad_tensor; } +std::vector _abs_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { + std::vector grad_tensor; + Tensor result = ttnn::multiply(grad, ttnn::sign(input, output_mem_config), std::nullopt, output_mem_config); + grad_tensor.emplace_back(result); + return grad_tensor; +} + std::function(const Tensor&, const Tensor&, const MemoryConfig&)> get_function_type1(UnaryBackwardOpType OpType){ >>>>>>> #9874: Merge round_bw to TTNN switch (OpType) { @@ -162,6 +169,8 @@ std::function(const Tensor&, const Tensor&, const Memo return _log_bw; case UnaryBackwardOpType::RELU6_BW: return _relu6_bw; + case UnaryBackwardOpType::ABS_BW: + return _abs_bw; default: TT_ASSERT(false && "Undefined op type"); return 0; diff --git a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.hpp b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.hpp index 653e40292371..5e58ed313bcf 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.hpp +++ b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.hpp @@ -23,7 +23,8 @@ enum class UnaryBackwardOpType { FLOOR_BW, ROUND_BW, LOG_BW, - RELU6_BW + RELU6_BW, + ABS_BW, }; struct UnaryBackwardFunction{ diff --git a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward.hpp b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward.hpp index c908e1a99db9..5a716a2b4e2f 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward.hpp +++ b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward.hpp @@ -76,6 +76,7 @@ constexpr auto floor_bw = ttnn::register_operation>("ttnn::round_bw"); constexpr auto log_bw = ttnn::register_operation>("ttnn::log_bw"); constexpr auto relu6_bw = ttnn::register_operation>("ttnn::relu6_bw"); +constexpr auto abs_bw = ttnn::register_operation>("ttnn::abs_bw"); } // namespace ttnn diff --git a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp index c4fc1e216464..962b5cc10156 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp +++ b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp @@ -215,6 +215,11 @@ void py_module(py::module& module) { ttnn::relu6_bw, R"doc(Performs backward operations for relu6 on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + detail::bind_unary_backward( + module, + ttnn::abs_bw, + R"doc(Performs backward operations for abs on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + } } // namespace binary_backward