From b7db8c5afd9fb452be082c6ccc197225f08c292d Mon Sep 17 00:00:00 2001 From: mouliraj-mcw Date: Tue, 9 Jul 2024 06:49:24 +0000 Subject: [PATCH] #10071: Merge floor_bw to TTNN --- docs/source/ttnn/ttnn/api.rst | 1 + docs/source/ttnn/ttnn/dependencies/tt_lib.rst | 2 -- .../backward_ops/test_backward_floor.py | 3 ++- .../tt_dnn/op_library/backward/backward_ops.cpp | 10 ---------- .../tt_dnn/op_library/backward/backward_ops.hpp | 3 --- .../tt_lib_bindings_tensor_backward_ops.cpp | 17 ----------------- .../unary_backward/device/unary_backward_op.cpp | 2 ++ .../eltwise/unary_backward/unary_backward.hpp | 1 + .../unary_backward/unary_backward_pybind.hpp | 5 +++++ 9 files changed, 11 insertions(+), 33 deletions(-) diff --git a/docs/source/ttnn/ttnn/api.rst b/docs/source/ttnn/ttnn/api.rst index eb913f593a3..7291df8e70c 100644 --- a/docs/source/ttnn/ttnn/api.rst +++ b/docs/source/ttnn/ttnn/api.rst @@ -193,6 +193,7 @@ Pointwise Unary ttnn/elu_bw ttnn/celu_bw ttnn/rpow_bw + ttnn/floor_bw Pointwise Binary ================ diff --git a/docs/source/ttnn/ttnn/dependencies/tt_lib.rst b/docs/source/ttnn/ttnn/dependencies/tt_lib.rst index b4b942d6133..54ab9221b0f 100644 --- a/docs/source/ttnn/ttnn/dependencies/tt_lib.rst +++ b/docs/source/ttnn/ttnn/dependencies/tt_lib.rst @@ -928,8 +928,6 @@ Backward Operations .. autofunction:: tt_lib.tensor.repeat_bw -.. autofunction:: tt_lib.tensor.floor_bw - .. autofunction:: tt_lib.tensor.round_bw .. autofunction:: tt_lib.tensor.unary_div_no_nan_bw diff --git a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_floor.py b/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_floor.py index b9fd8c12135..8d71f822865 100644 --- a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_floor.py +++ b/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_floor.py @@ -5,6 +5,7 @@ import torch import pytest import tt_lib +import ttnn from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import compare_pcc, data_gen_with_range @@ -22,7 +23,7 @@ def test_bw_floor(input_shapes, device): pyt_y = torch.floor(in_data) - tt_output_tensor_on_device = tt_lib.tensor.floor_bw(grad_tensor) + tt_output_tensor_on_device = ttnn.floor_bw(grad_tensor, input_tensor) in_data.retain_grad() diff --git a/tt_eager/tt_dnn/op_library/backward/backward_ops.cpp b/tt_eager/tt_dnn/op_library/backward/backward_ops.cpp index a60a9883150..329761eb192 100644 --- a/tt_eager/tt_dnn/op_library/backward/backward_ops.cpp +++ b/tt_eager/tt_dnn/op_library/backward/backward_ops.cpp @@ -1748,16 +1748,6 @@ std::vector repeat_bw( return operation::decorate_as_composite(__func__, _repeat_bw)(grad, input, shape, output_mem_config); } -std::vector _floor_bw(const Tensor& grad, const MemoryConfig& output_mem_config) { - std::vector grad_tensor; - Tensor t_zero = zeros_like(grad, output_mem_config); - grad_tensor.emplace_back(t_zero); - return grad_tensor; -} -std::vector floor_bw(const Tensor& grad, const MemoryConfig& output_mem_config) { - return operation::decorate_as_composite(__func__, _floor_bw)(grad, output_mem_config); -} - std::vector _round_bw(const Tensor& grad, const MemoryConfig& output_mem_config) { std::vector grad_tensor; Tensor t_zero = zeros_like(grad, output_mem_config); diff --git a/tt_eager/tt_dnn/op_library/backward/backward_ops.hpp b/tt_eager/tt_dnn/op_library/backward/backward_ops.hpp index 18a5e4b99d8..d8c69403085 100644 --- a/tt_eager/tt_dnn/op_library/backward/backward_ops.hpp +++ b/tt_eager/tt_dnn/op_library/backward/backward_ops.hpp @@ -422,9 +422,6 @@ std::vector complex_sub_bw( std::vector repeat_bw( const Tensor& grad, const Tensor& input, const Shape& shape, const MemoryConfig& output_mem_config); -std::vector floor_bw( - const Tensor& grad, const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG); - std::vector round_bw( const Tensor& grad, const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG); diff --git a/tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp b/tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp index cffa0bde7d6..a4cbf453f9d 100644 --- a/tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp +++ b/tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp @@ -1153,23 +1153,6 @@ namespace tt::tt_metal::detail{ "output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No" )doc"); - - m_tensor.def("floor_bw", &tt::tt_metal::floor_bw, - py::arg("grad").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc( - Returns an tensor of zeros like ``grad`` tensor - - Input tensor must have BFLOAT16 data type. - - Output tensor will have BFLOAT16 data type. - - .. csv-table:: - :header: "Argument", "Description", "Data type", "Valid range", "Required" - - "grad", "Gradient tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes" - "output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No" - )doc"); - - m_tensor.def("round_bw", &tt::tt_metal::round_bw, py::arg("grad").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc( Returns an tensor of zeros like ``grad`` tensor diff --git a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.cpp b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.cpp index 775252685fa..89fce03079e 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.cpp +++ b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.cpp @@ -388,6 +388,8 @@ std::function(const Tensor&, const Tensor&, const Memo return _relu_bw; case UnaryBackwardOpType::LOGIT_BW: return _logit_bw; + case UnaryBackwardOpType::FLOOR_BW: + return _floor_bw; default: TT_ASSERT(false && "Undefined op type"); return 0; diff --git a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward.hpp b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward.hpp index c59943916f8..ca74b38c485 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward.hpp +++ b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward.hpp @@ -92,6 +92,7 @@ constexpr auto leaky_relu_bw = ttnn::register_operation>("ttnn::elu_bw"); constexpr auto celu_bw = ttnn::register_operation>("ttnn::celu_bw"); constexpr auto rpow_bw = ttnn::register_operation>("ttnn::rpow_bw"); +constexpr auto floor_bw = ttnn::register_operation>("ttnn::floor_bw"); } // namespace ttnn diff --git a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp index f1a2e12c1a7..9f7caca571d 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp +++ b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp @@ -262,6 +262,11 @@ void py_module(py::module& module) { module, ttnn::logit_bw, R"doc(Performs backward operations for logit on :attr:`input_tensor` or attr:`input_tensor_a` with given :attr:`grad_tensor`.)doc"); + + detail::bind_unary_backward( + module, + ttnn::floor_bw, + R"doc(Performs backward operations for floor on :attr:`input_tensor` or attr:`input_tensor_a`, attr:`input_tensor_b` with given :attr:`grad_tensor`.)doc"); detail::bind_unary_backward( module,