From 49a688797474752181e75421213e9077a0e0a973 Mon Sep 17 00:00:00 2001 From: Bharane AB Date: Thu, 7 Mar 2024 13:07:24 +0000 Subject: [PATCH] #6136: Add backward support for unary LE and GE --- docs/source/ttnn/dependencies/tt_lib.rst | 4 +++ .../backward_ops/test_backward_ge.py | 28 +++++++++++++++++ .../backward_ops/test_backward_le.py | 28 +++++++++++++++++ .../op_library/backward/backward_ops.cpp | 23 ++++++++++++++ .../op_library/backward/backward_ops.hpp | 3 ++ .../tt_lib_bindings_tensor_backward_ops.cpp | 30 +++++++++++++++++++ 6 files changed, 116 insertions(+) create mode 100644 tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_ge.py create mode 100644 tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_le.py diff --git a/docs/source/ttnn/dependencies/tt_lib.rst b/docs/source/ttnn/dependencies/tt_lib.rst index e2b95fa916c..ce7ca7d0fef 100644 --- a/docs/source/ttnn/dependencies/tt_lib.rst +++ b/docs/source/ttnn/dependencies/tt_lib.rst @@ -1036,6 +1036,10 @@ Backward Operations .. autofunction:: tt_lib.tensor.log2_bw +.. autofunction:: tt_lib.tensor.ge_bw + +.. autofunction:: tt_lib.tensor.le_bw + Loss Functions ============== diff --git a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_ge.py b/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_ge.py new file mode 100644 index 00000000000..b592385ae4c --- /dev/null +++ b/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_ge.py @@ -0,0 +1,28 @@ +# SPDX-FileCopyrightText: © 2023 Tenstorrent Inc. + +# SPDX-License-Identifier: Apache-2.0 + +import torch +import pytest +import tt_lib +from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import data_gen_pt_tt, compare_results + + +@pytest.mark.parametrize( + "input_shapes", + ( + (torch.Size([1, 1, 32, 32])), + (torch.Size([1, 1, 320, 384])), + (torch.Size([1, 3, 320, 384])), + ), +) +def test_bw_unary_ge(input_shapes, device): + grad_data, grad_tensor = data_gen_pt_tt(input_shapes, device) + tt_output_tensor_on_device = tt_lib.tensor.ge_bw(grad_tensor) + + pyt_y = torch.zeros_like(grad_data) + + golden_tensor = [pyt_y] + + comp_pass = compare_results(tt_output_tensor_on_device, golden_tensor) + assert comp_pass diff --git a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_le.py b/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_le.py new file mode 100644 index 00000000000..87ad3a9d677 --- /dev/null +++ b/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_le.py @@ -0,0 +1,28 @@ +# SPDX-FileCopyrightText: © 2023 Tenstorrent Inc. + +# SPDX-License-Identifier: Apache-2.0 + +import torch +import pytest +import tt_lib +from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import data_gen_pt_tt, compare_results + + +@pytest.mark.parametrize( + "input_shapes", + ( + (torch.Size([1, 1, 32, 32])), + (torch.Size([1, 1, 320, 384])), + (torch.Size([1, 3, 320, 384])), + ), +) +def test_bw_unary_le(input_shapes, device): + grad_data, grad_tensor = data_gen_pt_tt(input_shapes, device) + tt_output_tensor_on_device = tt_lib.tensor.le_bw(grad_tensor) + + pyt_y = torch.zeros_like(grad_data) + + golden_tensor = [pyt_y] + + comp_pass = compare_results(tt_output_tensor_on_device, golden_tensor) + assert comp_pass diff --git a/tt_eager/tt_dnn/op_library/backward/backward_ops.cpp b/tt_eager/tt_dnn/op_library/backward/backward_ops.cpp index f3cde02cd67..9664c13a833 100644 --- a/tt_eager/tt_dnn/op_library/backward/backward_ops.cpp +++ b/tt_eager/tt_dnn/op_library/backward/backward_ops.cpp @@ -1528,6 +1528,29 @@ std::vector log2_bw(const Tensor& grad, const Tensor& input, const Memor { return operation::decorate_as_composite(__func__, _log2_bw)(grad, input, output_mem_config); } +std::vector _ge_bw(const Tensor& grad, const MemoryConfig& output_mem_config) { + std::vector grad_tensor; + Tensor t_zero = zeros_like(grad, output_mem_config); + grad_tensor.emplace_back(t_zero); + return grad_tensor; +} +std::vector ge_bw(const Tensor& grad, const MemoryConfig& output_mem_config) +{ + return operation::decorate_as_composite(__func__, _ge_bw)(grad, output_mem_config); +} + + +std::vector _le_bw(const Tensor& grad, const MemoryConfig& output_mem_config) { + std::vector grad_tensor; + Tensor t_zero = zeros_like(grad, output_mem_config); + grad_tensor.emplace_back(t_zero); + return grad_tensor; +} +std::vector le_bw(const Tensor& grad, const MemoryConfig& output_mem_config) +{ + return operation::decorate_as_composite(__func__, _le_bw)(grad, output_mem_config); +} + }//namespace tt_metal }//namespace tt diff --git a/tt_eager/tt_dnn/op_library/backward/backward_ops.hpp b/tt_eager/tt_dnn/op_library/backward/backward_ops.hpp index 0c97b525978..d3ca1590401 100644 --- a/tt_eager/tt_dnn/op_library/backward/backward_ops.hpp +++ b/tt_eager/tt_dnn/op_library/backward/backward_ops.hpp @@ -233,6 +233,9 @@ std::vector logiteps_bw(const Tensor& grad, const Tensor& input, float e std::vector log2_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG); +std::vector ge_bw(const Tensor& grad, const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG); + +std::vector le_bw(const Tensor& grad, const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG); } //namespace tt_metal } //namespace tt diff --git a/tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp b/tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp index fe4d78e7c85..30e61a45f1d 100644 --- a/tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp +++ b/tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp @@ -1781,5 +1781,35 @@ namespace tt::tt_metal::detail{ "input", "Input Tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes" "output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No" )doc"); + + m_tensor.def("ge_bw", &tt::tt_metal::ge_bw, + py::arg("grad").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc( + Returns an tensor of zeros like ``grad`` tensor + + Input tensor must have BFLOAT16 data type. + + Output tensor will have BFLOAT16 data type. + + .. csv-table:: + :header: "Argument", "Description", "Data type", "Valid range", "Required" + + "grad", "Gradient tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes" + "output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No" + )doc"); + + m_tensor.def("le_bw", &tt::tt_metal::le_bw, + py::arg("grad").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc( + Returns an tensor of zeros like ``grad`` tensor + + Input tensor must have BFLOAT16 data type. + + Output tensor will have BFLOAT16 data type. + + .. csv-table:: + :header: "Argument", "Description", "Data type", "Valid range", "Required" + + "grad", "Gradient tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes" + "output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No" + )doc"); } }