Skip to content

Commit

Permalink
#10079: Merge softsign_bw to TTNN
Browse files Browse the repository at this point in the history
  • Loading branch information
mouliraj-mcw committed Jul 12, 2024
1 parent 98644fe commit 906159e
Show file tree
Hide file tree
Showing 11 changed files with 80 additions and 41 deletions.
1 change: 1 addition & 0 deletions docs/source/ttnn/ttnn/api.rst
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,7 @@ Pointwise Unary
ttnn/log1p_bw
ttnn/erfc_bw
ttnn/ceil_bw
ttnn/softsign_bw

Pointwise Binary
================
Expand Down
2 changes: 0 additions & 2 deletions docs/source/ttnn/ttnn/dependencies/tt_lib.rst
Original file line number Diff line number Diff line change
Expand Up @@ -858,8 +858,6 @@ Backward Operations

.. autofunction:: tt_lib.tensor.logiteps_bw

.. autofunction:: tt_lib.tensor.softsign_bw

.. autofunction:: tt_lib.tensor.sign_bw

.. autofunction:: tt_lib.tensor.log2_bw
Expand Down
6 changes: 6 additions & 0 deletions docs/source/ttnn/ttnn/ttnn/softsign_bw.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
.. _ttnn.softsign_bw:

ttnn.softsign_bw
#################

.. autofunction:: ttnn.softsign_bw
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@

import torch
import pytest
import tt_lib
from tests.tt_eager.python_api_testing.unit_testing.backward_ops.utility_funcs import compare_pcc, data_gen_with_range
import ttnn
from tests.ttnn.unit_tests.operations.backward.utility_funcs import data_gen_with_range, compare_pcc


@pytest.mark.parametrize(
Expand All @@ -21,7 +21,7 @@ def test_bw_softsign(input_shapes, device):
in_data, input_tensor = data_gen_with_range(input_shapes, -100, 100, device, True)

pyt_y = torch.nn.functional.softsign(in_data)
tt_output_tensor_on_device = tt_lib.tensor.softsign_bw(grad_tensor, input_tensor)
tt_output_tensor_on_device = ttnn.softsign_bw(grad_tensor, input_tensor)

in_data.retain_grad()

Expand Down
31 changes: 31 additions & 0 deletions tt_eager/tt_dnn/op_library/backward/backward_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -892,6 +892,7 @@ std::vector<Tensor> logiteps_bw(
return operation::decorate_as_composite(__func__, _logiteps_bw)(grad, input, eps, output_mem_config);
}

<<<<<<< HEAD
// softsign
// result = grad_data / torch.square(1 + torch.abs(input))
std::vector<Tensor> _softsign_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) {
Expand All @@ -909,6 +910,36 @@ std::vector<Tensor> _softsign_bw(const Tensor& grad, const Tensor& input, const
}
std::vector<Tensor> softsign_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) {
return operation::decorate_as_composite(__func__, _softsign_bw)(grad, input, output_mem_config);
=======
std::vector<Tensor> _logit_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) {
std::vector<Tensor> grad_tensor;
Tensor grad_result =
ttnn::multiply(grad,
recip(ttnn::multiply(input, rsub(input, 1.0f, output_mem_config), std::nullopt, output_mem_config)),
std::nullopt,
output_mem_config);
Tensor status = ttnn::logical_and(
gte_unary(input, 0.0f, output_mem_config),
lte_unary(input, 1.0f, output_mem_config),
std::nullopt,
output_mem_config);
grad_result = where(
ttnn::eq(status, ones_like(input, output_mem_config), std::nullopt, output_mem_config), grad_result, std::nanf(""));
grad_result = where(
ttnn::logical_or(
eq_unary(input, 0.0, output_mem_config),
eq_unary(input, 1.0, output_mem_config),
std::nullopt,
output_mem_config),
mul_unary(sign(grad, output_mem_config), std::numeric_limits<float>::infinity(), output_mem_config),
grad_result,
output_mem_config);
grad_tensor.emplace_back(grad_result);
return grad_tensor;
}
std::vector<Tensor> logit_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) {
return operation::decorate_as_composite(__func__, _logit_bw)(grad, input, output_mem_config);
>>>>>>> #10079: Merge softsign_bw to TTNN
}

std::vector<Tensor> _sign_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) {
Expand Down
5 changes: 0 additions & 5 deletions tt_eager/tt_dnn/op_library/backward/backward_ops.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -216,11 +216,6 @@ std::vector<Tensor> logiteps_bw(
float eps = 0.0f,
const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG);

std::vector<Tensor> softsign_bw(
const Tensor& grad,
const Tensor& input,
const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG);

std::vector<Tensor> sign_bw(
const Tensor& grad,
const Tensor& input,
Expand Down
16 changes: 0 additions & 16 deletions tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -585,22 +585,6 @@ namespace tt::tt_metal::detail{
"output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No"
)doc");

m_tensor.def("softsign_bw", &tt::tt_metal::softsign_bw,
py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc(
Performs backward softsign operations on ``input`` tensors with given ``grad``.
Input tensors must have BFLOAT16 data type.
Output tensors will have BFLOAT16 data type.
.. csv-table::
:header: "Argument", "Description", "Data type", "Valid range", "Required"
"grad", "Gradient tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"input", "Tensor softsign_bw is applied to", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No"
)doc");

m_tensor.def("sign_bw", &tt::tt_metal::sign_bw,
py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc(
Performs backward sign operations on ``input`` tensors with given ``grad``.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -797,6 +797,22 @@ std::vector<Tensor> _ceil_bw(const Tensor& grad, const Tensor& input, const Memo
return grad_tensor;
}

// softsign
// result = grad_data / torch.square(1 + torch.abs(input))
std::vector<Tensor> _softsign_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) {
std::vector<Tensor> grad_tensor;
using ttnn::operations::unary::UnaryWithParam;
using ttnn::operations::unary::UnaryOpType;
std::vector<UnaryWithParam> ops_chain = {
UnaryWithParam {UnaryOpType::ABS},
UnaryWithParam {UnaryOpType::ADD_UNARY_SFPU, 1.0f},
UnaryWithParam {UnaryOpType::SQUARE},
UnaryWithParam {UnaryOpType::RECIP}};
grad_tensor.emplace_back(
ttnn::multiply(grad, ttnn::unary_chain(input, ops_chain, output_mem_config), std::nullopt, output_mem_config));
return grad_tensor;
}

std::function<std::vector<ttnn::Tensor>(const Tensor&, const Tensor&, const MemoryConfig&)> UnaryBackwardFunction::get_function_type1(UnaryBackwardOpType OpType){
switch (OpType) {
case UnaryBackwardOpType::ASSIGN_BW:
Expand Down Expand Up @@ -879,6 +895,8 @@ std::function<std::vector<ttnn::Tensor>(const Tensor&, const Tensor&, const Memo
return _erfc_bw;
case UnaryBackwardOpType::CEIL_BW:
return _ceil_bw;
case UnaryBackwardOpType::SOFTSIGN_BW:
return _softsign_bw;
default:
TT_ASSERT(false && "Undefined op type");
return 0;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ enum class UnaryBackwardOpType {
LOG1P_BW,
ERFC_BW,
CEIL_BW,
SOFTSIGN_BW,
};

struct UnaryBackwardFunction{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -118,5 +118,6 @@ constexpr auto log10_bw = ttnn::register_operation<operations::unary_backward::E
constexpr auto log1p_bw = ttnn::register_operation<operations::unary_backward::ExecuteUnaryBackward<operations::unary_backward::UnaryBackwardOpType::LOG1P_BW>>("ttnn::log1p_bw");
constexpr auto erfc_bw = ttnn::register_operation<operations::unary_backward::ExecuteUnaryBackward<operations::unary_backward::UnaryBackwardOpType::ERFC_BW>>("ttnn::erfc_bw");
constexpr auto ceil_bw = ttnn::register_operation<operations::unary_backward::ExecuteUnaryBackward<operations::unary_backward::UnaryBackwardOpType::CEIL_BW>>("ttnn::ceil_bw");
constexpr auto softsign_bw = ttnn::register_operation<operations::unary_backward::ExecuteUnaryBackward<operations::unary_backward::UnaryBackwardOpType::SOFTSIGN_BW>>("ttnn::softsign_bw");

} // namespace ttnn
Original file line number Diff line number Diff line change
Expand Up @@ -405,30 +405,34 @@ void py_module(py::module& module) {
R"doc(Performs backward operations for sin on :attr:`input_tensor` with given :attr:`grad_tensor`)doc");

detail::bind_unary_backward(
module,
ttnn::sinh_bw,
R"doc(Performs backward operations for sinh on :attr:`input_tensor` with given :attr:`grad_tensor`)doc");
module,
ttnn::sinh_bw,
R"doc(Performs backward operations for sinh on :attr:`input_tensor` with given :attr:`grad_tensor`)doc");

detail::bind_unary_backward(
module,
ttnn::log10_bw,
R"doc(Performs backward operations for log10 on :attr:`input_tensor` with given :attr:`grad_tensor`)doc");
module,
ttnn::log10_bw,
R"doc(Performs backward operations for log10 on :attr:`input_tensor` with given :attr:`grad_tensor`)doc");

detail::bind_unary_backward(
module,
ttnn::log1p_bw,
R"doc(Performs backward operations for log1p on :attr:`input_tensor` with given :attr:`grad_tensor`)doc");
module,
ttnn::log1p_bw,
R"doc(Performs backward operations for log1p on :attr:`input_tensor` with given :attr:`grad_tensor`)doc");

detail::bind_unary_backward(
module,
ttnn::erfc_bw,
R"doc(Performs backward operations for erfc on :attr:`input_tensor` with given :attr:`grad_tensor`)doc");
module,
ttnn::erfc_bw,
R"doc(Performs backward operations for erfc on :attr:`input_tensor` with given :attr:`grad_tensor`)doc");

detail::bind_unary_backward(
module,
ttnn::ceil_bw,
R"doc(Performs backward operations for ceil on :attr:`input_tensor` with given :attr:`grad_tensor`)doc");
module,
ttnn::ceil_bw,
R"doc(Performs backward operations for ceil on :attr:`input_tensor` with given :attr:`grad_tensor`)doc");

detail::bind_unary_backward(
module,
ttnn::softsign_bw,
R"doc(Performs backward operations for softsign on :attr:`input_tensor` with given :attr:`grad_tensor`)doc");

}

Expand Down

0 comments on commit 906159e

Please sign in to comment.