diff --git a/docs/source/ttnn/ttnn/api.rst b/docs/source/ttnn/ttnn/api.rst index 38a1729d718..0ec3893b2a3 100644 --- a/docs/source/ttnn/ttnn/api.rst +++ b/docs/source/ttnn/ttnn/api.rst @@ -213,6 +213,13 @@ Pointwise Unary ttnn/atanh_bw ttnn/asin_bw ttnn/asinh_bw + ttnn/sin_bw + ttnn/sinh_bw + ttnn/log10_bw + ttnn/log1p_bw + ttnn/erfc_bw + ttnn/ceil_bw + ttnn/softsign_bw Pointwise Binary ================ diff --git a/docs/source/ttnn/ttnn/dependencies/tt_lib.rst b/docs/source/ttnn/ttnn/dependencies/tt_lib.rst index ab2cacd3c24..4dcfd19aad3 100644 --- a/docs/source/ttnn/ttnn/dependencies/tt_lib.rst +++ b/docs/source/ttnn/ttnn/dependencies/tt_lib.rst @@ -846,18 +846,8 @@ Backward Operations .. autofunction:: tt_lib.tensor.angle_bw -.. autofunction:: tt_lib.tensor.sin_bw - -.. autofunction:: tt_lib.tensor.sinh_bw - -.. autofunction:: tt_lib.tensor.log10_bw - -.. autofunction:: tt_lib.tensor.log1p_bw - .. autofunction:: tt_lib.tensor.erf_bw -.. autofunction:: tt_lib.tensor.erfc_bw - .. autofunction:: tt_lib.tensor.digamma_bw .. autofunction:: tt_lib.tensor.deg2rad_bw @@ -868,12 +858,8 @@ Backward Operations .. autofunction:: tt_lib.tensor.logiteps_bw -.. autofunction:: tt_lib.tensor.softsign_bw - .. autofunction:: tt_lib.tensor.sign_bw -.. autofunction:: tt_lib.tensor.ceil_bw - .. autofunction:: tt_lib.tensor.log2_bw .. autofunction:: tt_lib.tensor.ge_bw diff --git a/docs/source/ttnn/ttnn/ttnn/ceil_bw.rst b/docs/source/ttnn/ttnn/ttnn/ceil_bw.rst new file mode 100644 index 00000000000..a8d874c099d --- /dev/null +++ b/docs/source/ttnn/ttnn/ttnn/ceil_bw.rst @@ -0,0 +1,6 @@ +.. _ttnn.ceil_bw: + +ttnn.ceil_bw +############# + + .. autofunction:: ttnn.ceil_bw diff --git a/docs/source/ttnn/ttnn/ttnn/erfc_bw.rst b/docs/source/ttnn/ttnn/ttnn/erfc_bw.rst new file mode 100644 index 00000000000..f9921c484de --- /dev/null +++ b/docs/source/ttnn/ttnn/ttnn/erfc_bw.rst @@ -0,0 +1,6 @@ +.. _ttnn.erfc_bw: + +ttnn.erfc_bw +############# + + .. autofunction:: ttnn.erfc_bw diff --git a/docs/source/ttnn/ttnn/ttnn/log10_bw.rst b/docs/source/ttnn/ttnn/ttnn/log10_bw.rst new file mode 100644 index 00000000000..380c94f3ef4 --- /dev/null +++ b/docs/source/ttnn/ttnn/ttnn/log10_bw.rst @@ -0,0 +1,6 @@ +.. _ttnn.log10_bw: + +ttnn.log10_bw +############## + + .. autofunction:: ttnn.log10_bw diff --git a/docs/source/ttnn/ttnn/ttnn/log1p_bw.rst b/docs/source/ttnn/ttnn/ttnn/log1p_bw.rst new file mode 100644 index 00000000000..75fca1ff106 --- /dev/null +++ b/docs/source/ttnn/ttnn/ttnn/log1p_bw.rst @@ -0,0 +1,7 @@ + +.. _ttnn.log1p_bw: + +ttnn.log1p_bw +############## + + .. autofunction:: ttnn.log1p_bw diff --git a/docs/source/ttnn/ttnn/ttnn/sin_bw.rst b/docs/source/ttnn/ttnn/ttnn/sin_bw.rst new file mode 100644 index 00000000000..e42d1e14fc9 --- /dev/null +++ b/docs/source/ttnn/ttnn/ttnn/sin_bw.rst @@ -0,0 +1,6 @@ +.. _ttnn.sin_bw: + +ttnn.sin_bw +############ + + .. autofunction:: ttnn.sin_bw diff --git a/docs/source/ttnn/ttnn/ttnn/sinh_bw.rst b/docs/source/ttnn/ttnn/ttnn/sinh_bw.rst new file mode 100644 index 00000000000..cff33237575 --- /dev/null +++ b/docs/source/ttnn/ttnn/ttnn/sinh_bw.rst @@ -0,0 +1,6 @@ +.. _ttnn.sinh_bw: + +ttnn.sinh_bw +############# + + .. autofunction:: ttnn.sinh_bw diff --git a/docs/source/ttnn/ttnn/ttnn/softsign_bw.rst b/docs/source/ttnn/ttnn/ttnn/softsign_bw.rst new file mode 100644 index 00000000000..fb21229a7d5 --- /dev/null +++ b/docs/source/ttnn/ttnn/ttnn/softsign_bw.rst @@ -0,0 +1,6 @@ +.. _ttnn.softsign_bw: + +ttnn.softsign_bw +################# + + .. autofunction:: ttnn.softsign_bw diff --git a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_ceil.py b/tests/ttnn/unit_tests/operations/backward/test_backward_ceil.py similarity index 77% rename from tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_ceil.py rename to tests/ttnn/unit_tests/operations/backward/test_backward_ceil.py index 6568898663e..a78769b9fac 100644 --- a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_ceil.py +++ b/tests/ttnn/unit_tests/operations/backward/test_backward_ceil.py @@ -4,8 +4,8 @@ import torch import pytest -import tt_lib -from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import compare_pcc, data_gen_with_range +import ttnn +from tests.ttnn.unit_tests.operations.backward.utility_funcs import data_gen_with_range, compare_pcc @pytest.mark.parametrize( @@ -21,7 +21,7 @@ def test_bw_ceil(input_shapes, device): in_data, input_tensor = data_gen_with_range(input_shapes, -100, 100, device, True) pyt_y = torch.ceil(in_data) - tt_output_tensor_on_device = tt_lib.tensor.ceil_bw(grad_tensor, input_tensor) + tt_output_tensor_on_device = ttnn.ceil_bw(grad_tensor, input_tensor) in_data.retain_grad() diff --git a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_erfc.py b/tests/ttnn/unit_tests/operations/backward/test_backward_erfc.py similarity index 77% rename from tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_erfc.py rename to tests/ttnn/unit_tests/operations/backward/test_backward_erfc.py index ded1df758d7..3dcb1912655 100644 --- a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_erfc.py +++ b/tests/ttnn/unit_tests/operations/backward/test_backward_erfc.py @@ -4,8 +4,8 @@ import torch import pytest -import tt_lib -from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import compare_pcc, data_gen_with_range +import ttnn +from tests.ttnn.unit_tests.operations.backward.utility_funcs import data_gen_with_range, compare_pcc @pytest.mark.parametrize( @@ -21,7 +21,7 @@ def test_bw_erfc(input_shapes, device): in_data, input_tensor = data_gen_with_range(input_shapes, -200, 199, device, required_grad=True) pyt_y = torch.erfc(in_data) - tt_output_tensor_on_device = tt_lib.tensor.erfc_bw(grad_tensor, input_tensor) + tt_output_tensor_on_device = ttnn.erfc_bw(grad_tensor, input_tensor) in_data.retain_grad() diff --git a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_log10.py b/tests/ttnn/unit_tests/operations/backward/test_backward_log10.py similarity index 76% rename from tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_log10.py rename to tests/ttnn/unit_tests/operations/backward/test_backward_log10.py index 62dee085209..ac9ba2eba70 100644 --- a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_log10.py +++ b/tests/ttnn/unit_tests/operations/backward/test_backward_log10.py @@ -4,11 +4,8 @@ import torch import pytest -import tt_lib -from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import ( - compare_pcc, - data_gen_with_range, -) +import ttnn +from tests.ttnn.unit_tests.operations.backward.utility_funcs import data_gen_with_range, compare_pcc @pytest.mark.parametrize( @@ -23,7 +20,7 @@ def test_bw_log10(input_shapes, device): in_data, input_tensor = data_gen_with_range(input_shapes, -100, 100, device, True) grad_data, grad_tensor = data_gen_with_range(input_shapes, -10, 10, device) - tt_output_tensor_on_device = tt_lib.tensor.log10_bw(grad_tensor, input_tensor) + tt_output_tensor_on_device = ttnn.log10_bw(grad_tensor, input_tensor) in_data.retain_grad() diff --git a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_log1p.py b/tests/ttnn/unit_tests/operations/backward/test_backward_log1p.py similarity index 76% rename from tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_log1p.py rename to tests/ttnn/unit_tests/operations/backward/test_backward_log1p.py index 117c2892207..d3d8cddb769 100644 --- a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_log1p.py +++ b/tests/ttnn/unit_tests/operations/backward/test_backward_log1p.py @@ -4,11 +4,8 @@ import torch import pytest -import tt_lib -from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import ( - compare_pcc, - data_gen_with_range, -) +import ttnn +from tests.ttnn.unit_tests.operations.backward.utility_funcs import data_gen_with_range, compare_pcc @pytest.mark.parametrize( @@ -23,7 +20,7 @@ def test_bw_log1p(input_shapes, device): in_data, input_tensor = data_gen_with_range(input_shapes, -100, 100, device, True) grad_data, grad_tensor = data_gen_with_range(input_shapes, -10, 10, device) - tt_output_tensor_on_device = tt_lib.tensor.log1p_bw(grad_tensor, input_tensor) + tt_output_tensor_on_device = ttnn.log1p_bw(grad_tensor, input_tensor) in_data.retain_grad() diff --git a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_sin.py b/tests/ttnn/unit_tests/operations/backward/test_backward_sin.py similarity index 78% rename from tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_sin.py rename to tests/ttnn/unit_tests/operations/backward/test_backward_sin.py index 432c56cc2d4..3a0b9058208 100644 --- a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_sin.py +++ b/tests/ttnn/unit_tests/operations/backward/test_backward_sin.py @@ -4,8 +4,8 @@ import torch import pytest -import tt_lib -from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import data_gen_with_range, compare_pcc +import ttnn +from tests.ttnn.unit_tests.operations.backward.utility_funcs import data_gen_with_range, compare_pcc from math import pi @@ -21,7 +21,7 @@ def test_bw_sin(input_shapes, device): in_data, input_tensor = data_gen_with_range(input_shapes, 0, 2 * pi, device, True) grad_data, grad_tensor = data_gen_with_range(input_shapes, -10, 10, device, False) - tt_output_tensor_on_device = tt_lib.tensor.sin_bw(grad_tensor, input_tensor) + tt_output_tensor_on_device = ttnn.sin_bw(grad_tensor, input_tensor) in_data.retain_grad() diff --git a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_sinh.py b/tests/ttnn/unit_tests/operations/backward/test_backward_sinh.py similarity index 85% rename from tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_sinh.py rename to tests/ttnn/unit_tests/operations/backward/test_backward_sinh.py index 9d3366b928b..689d2b01449 100644 --- a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_sinh.py +++ b/tests/ttnn/unit_tests/operations/backward/test_backward_sinh.py @@ -4,11 +4,9 @@ import torch import pytest -import tt_lib -from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import ( - data_gen_with_range, - compare_pcc, -) +import ttnn +from tests.ttnn.unit_tests.operations.backward.utility_funcs import data_gen_with_range, compare_pcc + from models.utility_functions import ( skip_for_wormhole_b0, ) @@ -26,7 +24,7 @@ def test_bw_sinh(input_shapes, device): in_data, input_tensor = data_gen_with_range(input_shapes, -9, 9, device, True) grad_data, grad_tensor = data_gen_with_range(input_shapes, -20, 20, device) - tt_output_tensor_on_device = tt_lib.tensor.sinh_bw(grad_tensor, input_tensor) + tt_output_tensor_on_device = ttnn.sinh_bw(grad_tensor, input_tensor) in_data.retain_grad() @@ -52,7 +50,7 @@ def test_bw_sinh_inf(input_shapes, device): in_data, input_tensor = data_gen_with_range(input_shapes, 89, 96, device, True) grad_data, grad_tensor = data_gen_with_range(input_shapes, -10, 10, device) - tt_output_tensor_on_device = tt_lib.tensor.sinh_bw(grad_tensor, input_tensor) + tt_output_tensor_on_device = ttnn.sinh_bw(grad_tensor, input_tensor) in_data.retain_grad() @@ -78,7 +76,7 @@ def test_bw_sinh_neg_inf(input_shapes, device): in_data, input_tensor = data_gen_with_range(input_shapes, -97, -89, device, True) grad_data, grad_tensor = data_gen_with_range(input_shapes, -10, 10, device) - tt_output_tensor_on_device = tt_lib.tensor.sinh_bw(grad_tensor, input_tensor) + tt_output_tensor_on_device = ttnn.sinh_bw(grad_tensor, input_tensor) in_data.retain_grad() @@ -101,7 +99,7 @@ def test_bw_sinh_nan_test1(input_shapes, device): in_data, input_tensor = data_gen_with_range(input_shapes, 86, 89, device, True) grad_data, grad_tensor = data_gen_with_range(input_shapes, 35, 50, device) - tt_output_tensor_on_device = tt_lib.tensor.sinh_bw(grad_tensor, input_tensor) + tt_output_tensor_on_device = ttnn.sinh_bw(grad_tensor, input_tensor) in_data.retain_grad() @@ -124,7 +122,7 @@ def test_bw_sinh_nan_test2(input_shapes, device): in_data, input_tensor = data_gen_with_range(input_shapes, 86, 89, device, True) grad_data, grad_tensor = data_gen_with_range(input_shapes, -50, -35, device) - tt_output_tensor_on_device = tt_lib.tensor.sinh_bw(grad_tensor, input_tensor) + tt_output_tensor_on_device = ttnn.sinh_bw(grad_tensor, input_tensor) in_data.retain_grad() diff --git a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_softsign.py b/tests/ttnn/unit_tests/operations/backward/test_backward_softsign.py similarity index 77% rename from tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_softsign.py rename to tests/ttnn/unit_tests/operations/backward/test_backward_softsign.py index 62aec548baf..d96759e6390 100644 --- a/tests/tt_eager/python_api_testing/unit_testing/backward_ops/test_backward_softsign.py +++ b/tests/ttnn/unit_tests/operations/backward/test_backward_softsign.py @@ -4,8 +4,8 @@ import torch import pytest -import tt_lib -from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import compare_pcc, data_gen_with_range +import ttnn +from tests.ttnn.unit_tests.operations.backward.utility_funcs import data_gen_with_range, compare_pcc @pytest.mark.parametrize( @@ -21,7 +21,7 @@ def test_bw_softsign(input_shapes, device): in_data, input_tensor = data_gen_with_range(input_shapes, -100, 100, device, True) pyt_y = torch.nn.functional.softsign(in_data) - tt_output_tensor_on_device = tt_lib.tensor.softsign_bw(grad_tensor, input_tensor) + tt_output_tensor_on_device = ttnn.softsign_bw(grad_tensor, input_tensor) in_data.retain_grad() diff --git a/tt_eager/tt_dnn/op_library/backward/backward_ops.cpp b/tt_eager/tt_dnn/op_library/backward/backward_ops.cpp index d6e4edaaeba..86e0fbc0e0a 100644 --- a/tt_eager/tt_dnn/op_library/backward/backward_ops.cpp +++ b/tt_eager/tt_dnn/op_library/backward/backward_ops.cpp @@ -474,49 +474,6 @@ std::vector hardtanh_bw( return operation::decorate_as_composite(__func__, _hardtanh_bw)(grad, input, min, max, output_mem_config); } -// name: sin(Tensor self) -> Tensor -// self: grad * self.cos() -std::vector _sin_bw(const Tensor& grad, const Tensor& input_tensor, const MemoryConfig& output_mem_config) { - std::vector grad_tensor; - Tensor grad_input = ttnn::multiply(grad, ttnn::cos(input_tensor, output_mem_config), std::nullopt, output_mem_config); - grad_tensor.emplace_back(grad_input); - return grad_tensor; -} -std::vector sin_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { - return operation::decorate_as_composite(__func__, _sin_bw)(grad, input, output_mem_config); -} - -// name: sinh(Tensor self) -> Tensor -// self: grad * self.cosh() -std::vector _sinh_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { - std::vector grad_tensor; - Tensor t_inf = ttnn::multiply(ttnn::sign(grad, output_mem_config), std::numeric_limits::infinity(), std::nullopt, output_mem_config); - Tensor grad_a = where( - ttnn::gt(input, full_like(input, 88.5, output_mem_config), std::nullopt, output_mem_config), - t_inf, - where( - ttnn::lt(input, full_like(input, -88.5, output_mem_config), std::nullopt, output_mem_config), - t_inf, - ttnn::multiply(grad, cosh(input, output_mem_config), std::nullopt, output_mem_config), - output_mem_config), - output_mem_config); - t_inf.deallocate(); - grad_a = where( - ttnn::ge(grad_a, 3.4e+38, std::nullopt, output_mem_config), - std::numeric_limits::infinity(), - where( - ttnn::le(grad_a, -3.4e+38, std::nullopt, output_mem_config), - -std::numeric_limits::infinity(), - grad_a, - output_mem_config), - output_mem_config); - grad_tensor.emplace_back(grad_a); - return grad_tensor; -} -std::vector sinh_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { - return operation::decorate_as_composite(__func__, _sinh_bw)(grad, input, output_mem_config); -} - // erfinv // self: 0.5 * sqrt(M_PI) * exp(self.erfinv().pow(2)) * grad // for input -1 and 1: grad.sign() * inf, for input > 1 or < -1 : nan @@ -557,56 +514,6 @@ std::vector erfinv_bw(const Tensor& grad, const Tensor& input, const Mem return operation::decorate_as_composite(__func__, _erfinv_bw)(grad, input, output_mem_config); } -// bw(log10(in)) = grad/(in * 2.30258509299404568402) -std::vector _log10_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { - std::vector grad_tensor; - Tensor t_inf = where( - ttnn::ltz(grad, output_mem_config), - -std::numeric_limits::infinity(), - std::numeric_limits::infinity(), - output_mem_config); - Tensor grad_a = ttnn::multiply( - grad, ttnn::reciprocal(ttnn::multiply(input, M_LN10, std::nullopt, output_mem_config), output_mem_config), std::nullopt, output_mem_config); - grad_a = where( - ttnn::logical_and(ttnn::eqz(input, output_mem_config), ttnn::eqz(grad, output_mem_config), std::nullopt, output_mem_config), - std::nanf(" "), - where(ttnn::eqz(input, output_mem_config), t_inf, grad_a, output_mem_config), - output_mem_config); - grad_tensor.emplace_back(grad_a); - return grad_tensor; -} -std::vector log10_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { - return operation::decorate_as_composite(__func__, _log10_bw)(grad, input, output_mem_config); -} - -// bw(log1p(in)) = grad/(in + 1) -// for -1 = inf -std::vector _log1p_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { - std::vector grad_tensor; - Tensor t_inf = where( - ttnn::ltz(grad, output_mem_config), - -std::numeric_limits::infinity(), - std::numeric_limits::infinity(), - output_mem_config); - Tensor t_inp1 = ttnn::add(input, 1.0f, std::nullopt, output_mem_config); - Tensor grad_a = ttnn::multiply(grad, ttnn::reciprocal(t_inp1, output_mem_config), std::nullopt, output_mem_config); - grad_a = where( - ttnn::eq(input, full_like(input, -1.0, output_mem_config), std::nullopt, output_mem_config), - t_inf, - grad_a, - output_mem_config); - grad_a = where( - ttnn::logical_and(ttnn::eqz(t_inp1, output_mem_config), ttnn::eqz(grad, output_mem_config)), - std::nanf(" "), - grad_a, - output_mem_config); - grad_tensor.emplace_back(grad_a); - return grad_tensor; -} -std::vector log1p_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { - return operation::decorate_as_composite(__func__, _log1p_bw)(grad, input, output_mem_config); -} - std::vector _erf_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { std::vector grad_tensor; Tensor result = ttnn::multiply( @@ -624,23 +531,6 @@ std::vector erf_bw(const Tensor& grad, const Tensor& input, const Memory return operation::decorate_as_composite(__func__, _erf_bw)(grad, input, output_mem_config); } -std::vector _erfc_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { - std::vector grad_tensor; - Tensor result = ttnn::multiply( - ttnn::multiply(ttnn::exp(ttnn::neg(ttnn::square(input, output_mem_config), output_mem_config), false, output_mem_config), - grad, - std::nullopt, - output_mem_config), - -M_2_SQRTPI, - std::nullopt, - output_mem_config); - grad_tensor.emplace_back(result); - return grad_tensor; -} -std::vector erfc_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { - return operation::decorate_as_composite(__func__, _erfc_bw)(grad, input, output_mem_config); -} - std::vector _digamma_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { std::vector grad_tensor; float t_inf = std::numeric_limits::infinity(); @@ -926,25 +816,6 @@ std::vector logiteps_bw( return operation::decorate_as_composite(__func__, _logiteps_bw)(grad, input, eps, output_mem_config); } -// softsign -// result = grad_data / torch.square(1 + torch.abs(input)) -std::vector _softsign_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { - std::vector grad_tensor; - using ttnn::operations::unary::UnaryWithParam; - using ttnn::operations::unary::UnaryOpType; - std::vector ops_chain = { - UnaryWithParam{UnaryOpType::ABS}, - UnaryWithParam{UnaryOpType::ADD_UNARY_SFPU, 1.0f}, - UnaryWithParam{UnaryOpType::SQUARE}, - UnaryWithParam{UnaryOpType::RECIP}}; - grad_tensor.emplace_back( - ttnn::multiply(grad, ttnn::unary_chain(input, ops_chain, output_mem_config), std::nullopt, output_mem_config)); - return grad_tensor; -} -std::vector softsign_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { - return operation::decorate_as_composite(__func__, _softsign_bw)(grad, input, output_mem_config); -} - std::vector _sign_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { std::vector grad_tensor; Tensor zero_grad = zeros_like(grad, output_mem_config); @@ -955,16 +826,6 @@ std::vector sign_bw(const Tensor& grad, const Tensor& input, const Memor return operation::decorate_as_composite(__func__, _sign_bw)(grad, input, output_mem_config); } -std::vector _ceil_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { - std::vector grad_tensor; - Tensor zero_grad = zeros_like(grad, output_mem_config); - grad_tensor.emplace_back(zero_grad); - return grad_tensor; -} -std::vector ceil_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { - return operation::decorate_as_composite(__func__, _ceil_bw)(grad, input, output_mem_config); -} - // bw(log2(in)) = grad/(in * 0.69314718055994530942) std::vector _log2_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { std::vector grad_tensor; diff --git a/tt_eager/tt_dnn/op_library/backward/backward_ops.hpp b/tt_eager/tt_dnn/op_library/backward/backward_ops.hpp index 8473c2436bf..8e1845e4a7b 100644 --- a/tt_eager/tt_dnn/op_library/backward/backward_ops.hpp +++ b/tt_eager/tt_dnn/op_library/backward/backward_ops.hpp @@ -176,36 +176,11 @@ std::vector angle_bw( bool is_complextensor = true, const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG); -std::vector sin_bw( - const Tensor& grad, - const Tensor& input, - const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG); - -std::vector sinh_bw( - const Tensor& grad, - const Tensor& input, - const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG); - -std::vector log10_bw( - const Tensor& grad, - const Tensor& input, - const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG); - -std::vector log1p_bw( - const Tensor& grad, - const Tensor& input, - const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG); - std::vector erf_bw( const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG); -std::vector erfc_bw( - const Tensor& grad, - const Tensor& input, - const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG); - std::vector digamma_bw( const Tensor& grad, const Tensor& input, @@ -241,21 +216,11 @@ std::vector logiteps_bw( float eps = 0.0f, const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG); -std::vector softsign_bw( - const Tensor& grad, - const Tensor& input, - const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG); - std::vector sign_bw( const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG); -std::vector ceil_bw( - const Tensor& grad, - const Tensor& input, - const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG); - std::vector log2_bw( const Tensor& grad, const Tensor& input, diff --git a/tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp b/tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp index b581ff66593..998273c95d8 100644 --- a/tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp +++ b/tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp @@ -451,39 +451,6 @@ namespace tt::tt_metal::detail{ "output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No" )doc"); - m_tensor.def("sin_bw", &tt::tt_metal::sin_bw, - py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc( - Performs backward operations for sin of the ``input`` with given ``grad`` - - Input tensors must have BFLOAT16 data type. - - Output tensor will have BFLOAT16 data type. - - .. csv-table:: - :header: "Argument", "Description", "Data type", "Valid range", "Required" - - "grad", "Gradient tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes" - "input", "Input Tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes" - "output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No" - )doc"); - - m_tensor.def("sinh_bw", &tt::tt_metal::sinh_bw, - py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc( - Performs backward operations for hyperbolic sin of ``input`` tensors with given ``grad``. - - Input tensors must have BFLOAT16 data type. - - Output tensors will have BFLOAT16 data type. - - .. csv-table:: - :header: "Argument", "Description", "Data type", "Valid range", "Required" - - "grad", "Gradient tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes" - "input", "Input Tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes" - "output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No" - )doc"); - - m_tensor.def("erfinv_bw", &tt::tt_metal::erfinv_bw, py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc( Performs backward operations for erfinv of ``input`` tensor with given ``grad``. @@ -500,39 +467,6 @@ namespace tt::tt_metal::detail{ "output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No" )doc"); - m_tensor.def("log10_bw", &tt::tt_metal::log10_bw, - py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc( - Performs backward operations for log10 of ``input`` tensors with given ``grad``. - - Input tensors must have BFLOAT16 data type. - - Output tensors will have BFLOAT16 data type. - - .. csv-table:: - :header: "Argument", "Description", "Data type", "Valid range", "Required" - - "grad", "Gradient tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes" - "input", "Input Tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes" - "output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No" - )doc"); - - - m_tensor.def("log1p_bw", &tt::tt_metal::log1p_bw, - py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc( - Performs backward operations for log1p of ``input`` tensors with given ``grad``. - - Input tensors must have BFLOAT16 data type. - - Output tensors will have BFLOAT16 data type. - - .. csv-table:: - :header: "Argument", "Description", "Data type", "Valid range", "Required" - - "grad", "Gradient tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes" - "input", "Input Tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes" - "output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No" - )doc"); - m_tensor.def("erf_bw", &tt::tt_metal::erf_bw, py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc( Performs backward operations for erf of ``input`` tensor with given ``grad``. @@ -549,22 +483,6 @@ namespace tt::tt_metal::detail{ "output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No" )doc"); - m_tensor.def("erfc_bw", &tt::tt_metal::erfc_bw, - py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc( - Performs backward operations for erfc of ``input`` tensor with given ``grad``. - - Input tensors must have BFLOAT16 data type. - - Output tensors will have BFLOAT16 data type. - - .. csv-table:: - :header: "Argument", "Description", "Data type", "Valid range", "Required" - - "grad", "Gradient tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes" - "input", "Input Tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes" - "output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No" - )doc"); - m_tensor.def("digamma_bw", &tt::tt_metal::digamma_bw, py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc( Performs backward operations for digamma for the ``input`` with given ``grad`` @@ -667,22 +585,6 @@ namespace tt::tt_metal::detail{ "output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No" )doc"); - m_tensor.def("softsign_bw", &tt::tt_metal::softsign_bw, - py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc( - Performs backward softsign operations on ``input`` tensors with given ``grad``. - - Input tensors must have BFLOAT16 data type. - - Output tensors will have BFLOAT16 data type. - - .. csv-table:: - :header: "Argument", "Description", "Data type", "Valid range", "Required" - - "grad", "Gradient tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes" - "input", "Tensor softsign_bw is applied to", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes" - "output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No" - )doc"); - m_tensor.def("sign_bw", &tt::tt_metal::sign_bw, py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc( Performs backward sign operations on ``input`` tensors with given ``grad``. @@ -699,22 +601,6 @@ namespace tt::tt_metal::detail{ "output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No" )doc"); - m_tensor.def("ceil_bw", &tt::tt_metal::ceil_bw, - py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc( - Performs backward ceil operations on ``input`` tensors with given ``grad``. - - Input tensors must have BFLOAT16 data type. - - Output tensors will have BFLOAT16 data type. - - .. csv-table:: - :header: "Argument", "Description", "Data type", "Valid range", "Required" - - "grad", "Gradient tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes" - "input", "Tensor ceil_bw is applied to", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes" - "output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No" - )doc"); - m_tensor.def("log2_bw", &tt::tt_metal::log2_bw, py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc( Performs backward operations for log2 of ``input`` tensors with given ``grad``. diff --git a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.cpp b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.cpp index 1d1b2be641d..dee0e3a1693 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.cpp +++ b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.cpp @@ -695,6 +695,124 @@ std::vector _asinh_bw(const Tensor& grad, const Tensor& input, const Mem return grad_tensor; } +// name: sin(Tensor self) -> Tensor +// self: grad * self.cos() +std::vector _sin_bw(const Tensor& grad, const Tensor& input_tensor, const MemoryConfig& output_mem_config) { + std::vector grad_tensor; + Tensor grad_input = ttnn::multiply(grad, ttnn::cos(input_tensor, output_mem_config), std::nullopt, output_mem_config); + grad_tensor.emplace_back(grad_input); + return grad_tensor; +} + +// name: sinh(Tensor self) -> Tensor +// self: grad * self.cosh() +std::vector _sinh_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { + std::vector grad_tensor; + Tensor t_inf = ttnn::multiply(ttnn::sign(grad, output_mem_config), std::numeric_limits::infinity(), std::nullopt, output_mem_config); + Tensor grad_a = where( + ttnn::gt(input, ttnn::operations::creation::full_like(input, 88.5), std::nullopt, output_mem_config), + t_inf, + where( + ttnn::lt(input, ttnn::operations::creation::full_like(input, -88.5), std::nullopt, output_mem_config), + t_inf, + ttnn::multiply(grad, cosh(input, output_mem_config), std::nullopt, output_mem_config), + output_mem_config), + output_mem_config); + t_inf.deallocate(); + grad_a = where( + ttnn::ge(grad_a, 3.4e+38, std::nullopt, output_mem_config), + std::numeric_limits::infinity(), + where( + ttnn::le(grad_a, -3.4e+38, std::nullopt, output_mem_config), + -std::numeric_limits::infinity(), + grad_a, + output_mem_config), + output_mem_config); + grad_tensor.emplace_back(grad_a); + return grad_tensor; +} + +// bw(log10(in)) = grad/(in * 2.30258509299404568402) +std::vector _log10_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { + std::vector grad_tensor; + Tensor t_inf = where( + ttnn::ltz(grad, output_mem_config), + -std::numeric_limits::infinity(), + std::numeric_limits::infinity(), + output_mem_config); + Tensor grad_a = ttnn::multiply( + grad, ttnn::reciprocal(ttnn::multiply(input, M_LN10, std::nullopt, output_mem_config), output_mem_config), std::nullopt, output_mem_config); + grad_a = where( + ttnn::logical_and(ttnn::eqz(input, output_mem_config), ttnn::eqz(grad, output_mem_config), std::nullopt, output_mem_config), + std::nanf(" "), + where(ttnn::eqz(input, output_mem_config), t_inf, grad_a, output_mem_config), + output_mem_config); + grad_tensor.emplace_back(grad_a); + return grad_tensor; +} + +// bw(log1p(in)) = grad/(in + 1) +// for -1 = inf +std::vector _log1p_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { + std::vector grad_tensor; + Tensor t_inf = where( + ttnn::ltz(grad, output_mem_config), + -std::numeric_limits::infinity(), + std::numeric_limits::infinity(), + output_mem_config); + Tensor t_inp1 = ttnn::add(input, 1.0f, std::nullopt, output_mem_config); + Tensor grad_a = ttnn::multiply(grad, ttnn::reciprocal(t_inp1, output_mem_config), std::nullopt, output_mem_config); + grad_a = where( + ttnn::eq(input, ttnn::operations::creation::full_like(input, -1.0), std::nullopt, output_mem_config), + t_inf, + grad_a, + output_mem_config); + grad_a = where( + ttnn::logical_and(ttnn::eqz(t_inp1, output_mem_config), eqz(grad, output_mem_config)), + std::nanf(" "), + grad_a, + output_mem_config); + grad_tensor.emplace_back(grad_a); + return grad_tensor; +} + +std::vector _erfc_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { + std::vector grad_tensor; + Tensor result = ttnn::multiply( + ttnn::multiply(ttnn::exp(ttnn::neg(ttnn::square(input, output_mem_config), output_mem_config), false, output_mem_config), + grad, + std::nullopt, + output_mem_config), + -M_2_SQRTPI, + std::nullopt, + output_mem_config); + grad_tensor.emplace_back(result); + return grad_tensor; +} + +std::vector _ceil_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { + std::vector grad_tensor; + Tensor zero_grad = ttnn::operations::creation::zeros_like(grad); + grad_tensor.emplace_back(zero_grad); + return grad_tensor; +} + +// softsign +// result = grad_data / torch.square(1 + torch.abs(input)) +std::vector _softsign_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { + std::vector grad_tensor; + using ttnn::operations::unary::UnaryWithParam; + using ttnn::operations::unary::UnaryOpType; + std::vector ops_chain = { + UnaryWithParam {UnaryOpType::ABS}, + UnaryWithParam {UnaryOpType::ADD_UNARY_SFPU, 1.0f}, + UnaryWithParam {UnaryOpType::SQUARE}, + UnaryWithParam {UnaryOpType::RECIP}}; + grad_tensor.emplace_back( + ttnn::multiply(grad, ttnn::unary_chain(input, ops_chain, output_mem_config), std::nullopt, output_mem_config)); + return grad_tensor; +} + std::function(const Tensor&, const Tensor&, const MemoryConfig&)> UnaryBackwardFunction::get_function_type1(UnaryBackwardOpType OpType){ switch (OpType) { case UnaryBackwardOpType::ASSIGN_BW: @@ -765,6 +883,20 @@ std::function(const Tensor&, const Tensor&, const Memo return _asin_bw; case UnaryBackwardOpType::ASINH_BW: return _asinh_bw; + case UnaryBackwardOpType::SIN_BW: + return _sin_bw; + case UnaryBackwardOpType::SINH_BW: + return _sinh_bw; + case UnaryBackwardOpType::LOG10_BW: + return _log10_bw; + case UnaryBackwardOpType::LOG1P_BW: + return _log1p_bw; + case UnaryBackwardOpType::ERFC_BW: + return _erfc_bw; + case UnaryBackwardOpType::CEIL_BW: + return _ceil_bw; + case UnaryBackwardOpType::SOFTSIGN_BW: + return _softsign_bw; default: TT_ASSERT(false && "Undefined op type"); return 0; diff --git a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.hpp b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.hpp index 0b0d9e02aed..cf7578ccc96 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.hpp +++ b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.hpp @@ -59,7 +59,14 @@ enum class UnaryBackwardOpType { TANHSHRINK_BW, ATANH_BW, ASIN_BW, - ASINH_BW + ASINH_BW, + SIN_BW, + SINH_BW, + LOG10_BW, + LOG1P_BW, + ERFC_BW, + CEIL_BW, + SOFTSIGN_BW, }; struct UnaryBackwardFunction{ diff --git a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward.hpp b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward.hpp index 21790f1aa76..c0017b02078 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward.hpp +++ b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward.hpp @@ -112,5 +112,12 @@ constexpr auto tanhshrink_bw = ttnn::register_operation>("ttnn::atanh_bw"); constexpr auto asin_bw = ttnn::register_operation>("ttnn::asin_bw"); constexpr auto asinh_bw = ttnn::register_operation>("ttnn::asinh_bw"); +constexpr auto sin_bw = ttnn::register_operation>("ttnn::sin_bw"); +constexpr auto sinh_bw = ttnn::register_operation>("ttnn::sinh_bw"); +constexpr auto log10_bw = ttnn::register_operation>("ttnn::log10_bw"); +constexpr auto log1p_bw = ttnn::register_operation>("ttnn::log1p_bw"); +constexpr auto erfc_bw = ttnn::register_operation>("ttnn::erfc_bw"); +constexpr auto ceil_bw = ttnn::register_operation>("ttnn::ceil_bw"); +constexpr auto softsign_bw = ttnn::register_operation>("ttnn::softsign_bw"); } // namespace ttnn diff --git a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp index 48e07b0db61..3f26c92e9e3 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp +++ b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp @@ -398,6 +398,41 @@ void py_module(py::module& module) { module, ttnn::asinh_bw, R"doc(Performs backward operations for asinh on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + + detail::bind_unary_backward( + module, + ttnn::sin_bw, + R"doc(Performs backward operations for sin on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + + detail::bind_unary_backward( + module, + ttnn::sinh_bw, + R"doc(Performs backward operations for sinh on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + + detail::bind_unary_backward( + module, + ttnn::log10_bw, + R"doc(Performs backward operations for log10 on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + + detail::bind_unary_backward( + module, + ttnn::log1p_bw, + R"doc(Performs backward operations for log1p on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + + detail::bind_unary_backward( + module, + ttnn::erfc_bw, + R"doc(Performs backward operations for erfc on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + + detail::bind_unary_backward( + module, + ttnn::ceil_bw, + R"doc(Performs backward operations for ceil on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + + detail::bind_unary_backward( + module, + ttnn::softsign_bw, + R"doc(Performs backward operations for softsign on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); }