diff --git a/ttnn/cpp/ttnn/operations/eltwise/binary_backward/binary_backward_pybind.hpp b/ttnn/cpp/ttnn/operations/eltwise/binary_backward/binary_backward_pybind.hpp index 37b85742e3b..d327d5564b8 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/binary_backward/binary_backward_pybind.hpp +++ b/ttnn/cpp/ttnn/operations/eltwise/binary_backward/binary_backward_pybind.hpp @@ -22,31 +22,24 @@ namespace binary_backward { namespace detail { template -void bind_binary_backward_ops(py::module& module, const binary_backward_operation_t& operation, std::string_view description, std::string_view supported_dtype) { +void bind_binary_backward_ops(py::module& module, const binary_backward_operation_t& operation, const std::string_view description, const std::string_view supported_dtype = "") { auto doc = fmt::format( R"doc( {2} - Args: - grad_tensor (ttnn.Tensor): the input tensor. + grad_tensor (ttnn.Tensor): the input gradient tensor. input_tensor_a (ttnn.Tensor): the input tensor. input_tensor_b (ttnn.Tensor): the input tensor. - Keyword args: memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. - Returns: List of ttnn.Tensor: the output tensor. - - Supported dtypes, layouts, and ranks: - - - {3} - + Note: + {3} Note : bfloat8_b/bfloat4_b is only supported on TILE_LAYOUT @@ -86,14 +79,13 @@ void bind_binary_backward_ops(py::module& module, const binary_backward_operatio } template -void bind_binary_backward_int_default(py::module& module, const binary_backward_operation_t& operation, const std::string& parameter_name, const std::string& parameter_doc, int parameter_value, std::string_view description, std::string_view supported_dtype) { +void bind_binary_backward_int_default(py::module& module, const binary_backward_operation_t& operation, const std::string& parameter_name, const std::string& parameter_doc, int parameter_value, const std::string_view description, const std::string_view supported_dtype = "") { auto doc = fmt::format( R"doc( {5} - Args: - grad_tensor (ttnn.Tensor): the input tensor. + grad_tensor (ttnn.Tensor): the input gradient tensor. input_tensor_a (ttnn.Tensor): the input tensor. input_tensor_b (ttnn.Tensor): the input tensor. {2} (int): {3}. Defaults to `{4}`. @@ -111,11 +103,8 @@ void bind_binary_backward_int_default(py::module& module, const binary_backward_ List of ttnn.Tensor: the output tensor. - Supported dtypes, layouts, and ranks: - - - {6} - + Note: + {6} Note : bfloat8_b/bfloat4_b is only supported on TILE_LAYOUT @@ -168,14 +157,14 @@ void bind_binary_backward_int_default(py::module& module, const binary_backward_ } template -void bind_binary_backward_opt_float_default(py::module& module, const binary_backward_operation_t& operation, const std::string& parameter_name, const std::string& parameter_doc, float parameter_value, std::string_view description, std::string_view supported_dtype) { +void bind_binary_backward_opt_float_default(py::module& module, const binary_backward_operation_t& operation, const std::string& parameter_name, const std::string& parameter_doc, float parameter_value, const std::string_view description, const std::string_view supported_dtype = "") { auto doc = fmt::format( R"doc( {5} Args: - grad_tensor (ttnn.Tensor): the input tensor. + grad_tensor (ttnn.Tensor): the input gradient tensor. input_tensor_a (ttnn.Tensor): the input tensor. input_tensor_b (ttnn.Tensor): the input tensor. {2} (float): {3}. Defaults to `{4}`. @@ -193,10 +182,8 @@ void bind_binary_backward_opt_float_default(py::module& module, const binary_bac List of ttnn.Tensor: the output tensor. - Supported dtypes, layouts, and ranks: - - {6} - + Note: + {6} Note : bfloat8_b/bfloat4_b is only supported on TILE_LAYOUT @@ -266,7 +253,7 @@ void bind_binary_backward_float_string_default( {7} Args: - grad_tensor (ttnn.Tensor): the input tensor. + grad_tensor (ttnn.Tensor): the input gradient tensor. input_tensor_a (ttnn.Tensor): the input tensor. input_tensor_b (ttnn.Tensor or Number): the input tensor. @@ -277,9 +264,8 @@ void bind_binary_backward_float_string_default( Returns: List of ttnn.Tensor: the output tensor. - Supported dtypes, layouts, and ranks: - - {8} + Note: + {8} Note : bfloat8_b/bfloat4_b is only supported on TILE_LAYOUT @@ -337,14 +323,14 @@ void bind_binary_backward_float_string_default( } template -void bind_binary_backward_sub_alpha(py::module& module, const binary_backward_operation_t& operation, const std::string& parameter_name, const std::string& parameter_doc, float parameter_value, std::string_view description, std::string_view supported_dtype) { +void bind_binary_backward_sub_alpha(py::module& module, const binary_backward_operation_t& operation, const std::string& parameter_name, const std::string& parameter_doc, float parameter_value, const std::string_view description, const std::string_view supported_dtype = "") { auto doc = fmt::format( R"doc( {5} Args: - grad_tensor (ttnn.Tensor): the input tensor. + grad_tensor (ttnn.Tensor): the input gradient tensor. input_tensor_a (ttnn.Tensor): the input tensor. input_tensor_b (ttnn.Tensor): the input tensor. {2} (float): {3}. Defaults to `{4}`. @@ -356,9 +342,8 @@ void bind_binary_backward_sub_alpha(py::module& module, const binary_backward_op other_grad (ttnn.Tensor, optional): Preallocated output tensor for gradient of `input_tensor_b`. Defaults to `None`. queue_id (int, optional): command queue id. Defaults to `0`. - Supported dtypes, layouts, and ranks: - - {6} + Note: + {6} Note : bfloat8_b/bfloat4_b is only supported on TILE_LAYOUT @@ -409,15 +394,15 @@ void bind_binary_backward_sub_alpha(py::module& module, const binary_backward_op } template -void bind_binary_backward_rsub(py::module& module, const binary_backward_operation_t& operation, std::string_view description, std::string_view supported_dtype) { +void bind_binary_backward_rsub(py::module& module, const binary_backward_operation_t& operation, const std::string_view description, const std::string_view supported_dtype = "") { auto doc = fmt::format( - R"doc({0}(grad_tensor: ttnn.Tensor, input_tensor_a: ttnn.Tensor, input_tensor_b: ttnn.Tensor, *, are_required_outputs: Optional[List[bool]] = [True, True], memory_config: ttnn.MemoryConfig) -> std::vector + R"doc( {2} Args: - grad_tensor (ttnn.Tensor): the input tensor. + grad_tensor (ttnn.Tensor): the input gradient tensor. input_tensor_a (ttnn.Tensor): the input tensor. input_tensor_b (ttnn.Tensor): the input tensor. @@ -428,9 +413,8 @@ void bind_binary_backward_rsub(py::module& module, const binary_backward_operati other_grad (ttnn.Tensor, optional): Preallocated output tensor for gradient of `input_tensor_b`. Defaults to `None`. queue_id (int, optional): command queue id. Defaults to `0`. - Supported dtypes, layouts, and ranks: - - {3} + Note: + {3} Note : bfloat8_b/bfloat4_b is only supported on TILE_LAYOUT @@ -476,13 +460,13 @@ void bind_binary_backward_rsub(py::module& module, const binary_backward_operati } template -void bind_binary_bw_mul(py::module& module, const binary_backward_operation_t& operation, std::string_view description, std::string_view supported_dtype) { +void bind_binary_bw_mul(py::module& module, const binary_backward_operation_t& operation, const std::string_view description, const std::string_view supported_dtype = "") { auto doc = fmt::format( R"doc( {2} Args: - grad_tensor (ComplexTensor or ttnn.Tensor): the input tensor. + grad_tensor (ComplexTensor or ttnn.Tensor): the input gradient tensor. input_tensor_a (ComplexTensor or ttnn.Tensor): the input tensor. input_tensor_b (ComplexTensor or ttnn.Tensor or Number): the input tensor. @@ -496,9 +480,8 @@ void bind_binary_bw_mul(py::module& module, const binary_backward_operation_t& o Returns: List of ttnn.Tensor: the output tensor. - Supported dtypes, layouts, and ranks: - - {3} + Note: + {3} Note : bfloat8_b/bfloat4_b is only supported on TILE_LAYOUT @@ -578,7 +561,7 @@ void bind_binary_bw_mul(py::module& module, const binary_backward_operation_t& o template -void bind_binary_bw(py::module& module, const binary_backward_operation_t& operation, std::string_view description, std::string_view supported_dtype) { +void bind_binary_bw(py::module& module, const binary_backward_operation_t& operation, const std::string_view description, const std::string_view supported_dtype = "") { auto doc = fmt::format( R"doc( @@ -586,7 +569,7 @@ void bind_binary_bw(py::module& module, const binary_backward_operation_t& opera Supports broadcasting. Args: - grad_tensor (ComplexTensor or ttnn.Tensor): the input tensor. + grad_tensor (ComplexTensor or ttnn.Tensor): the input gradient tensor. input_tensor_a (ComplexTensor or ttnn.Tensor): the input tensor. input_tensor_b (ComplexTensor or ttnn.Tensor or Number): the input tensor. @@ -597,9 +580,8 @@ void bind_binary_bw(py::module& module, const binary_backward_operation_t& opera other_grad (ttnn.Tensor, optional): Preallocated output tensor for gradient of `input_tensor_b`. Defaults to `None`. queue_id (int, optional): command queue id. Defaults to `0`. - Supported dtypes, layouts, and ranks: - - {3} + Note: + {3} Note : bfloat8_b/bfloat4_b is only supported on TILE_LAYOUT @@ -680,14 +662,14 @@ void bind_binary_bw(py::module& module, const binary_backward_operation_t& opera } template -void bind_binary_bw_optional(py::module& module, const binary_backward_operation_t& operation, std::string_view description, std::string_view supported_dtype = "") { +void bind_binary_bw_optional(py::module& module, const binary_backward_operation_t& operation, const std::string_view description, const std::string_view supported_dtype = "") { auto doc = fmt::format( R"doc( {2} Args: - grad_tensor (ttnn.Tensor): the input tensor. + grad_tensor (ttnn.Tensor): the input gradient tensor. input_tensor (ttnn.Tensor): the input tensor. other_tensor (ttnn.Tensor or Number): the input tensor. @@ -698,9 +680,8 @@ void bind_binary_bw_optional(py::module& module, const binary_backward_operation other_grad (ttnn.Tensor, optional): Preallocated output tensor for gradient of `other_tensor`. Defaults to `None`. queue_id (int, optional): command queue id. Defaults to `0`. - Supported dtypes, layouts, and ranks: - - {3} + Note: + {3} Note : bfloat8_b/bfloat4_b is only supported on TILE_LAYOUT @@ -764,14 +745,14 @@ void bind_binary_bw_optional(py::module& module, const binary_backward_operation } template -void bind_binary_bw_div(py::module& module, const binary_backward_operation_t& operation, std::string_view description, std::string_view supported_dtype) { +void bind_binary_bw_div(py::module& module, const binary_backward_operation_t& operation, const std::string_view description, const std::string_view supported_dtype = "") { auto doc = fmt::format( R"doc( {2} Args: - grad_tensor (ComplexTensor or ttnn.Tensor): the input tensor. + grad_tensor (ComplexTensor or ttnn.Tensor): the input gradient tensor. input_tensor_a (ComplexTensor or ttnn.Tensor): the input tensor. input_tensor_b (ComplexTensor or ttnn.Tensor or Number): the input tensor. @@ -788,9 +769,8 @@ void bind_binary_bw_div(py::module& module, const binary_backward_operation_t& o Supports broadcasting. - Supported dtypes, layouts, and ranks: - - {3} + Note: + {3} Note : bfloat8_b/bfloat4_b is only supported on TILE_LAYOUT @@ -880,7 +860,7 @@ void bind_binary_backward_overload(py::module& module, const binary_backward_ope {2} Args: - grad_tensor (ttnn.Tensor): the input tensor. + grad_tensor (ttnn.Tensor): the input gradient tensor. input_tensor_a (ttnn.Tensor): the input tensor. input_tensor_b (ttnn.Tensor or Number): the input tensor. @@ -890,7 +870,7 @@ void bind_binary_backward_overload(py::module& module, const binary_backward_ope Returns: List of ttnn.Tensor: the output tensor. - Supported dtypes, layouts, and ranks: + Note: {3} Example: @@ -941,14 +921,14 @@ void bind_binary_backward_overload(py::module& module, const binary_backward_ope } template -void bind_binary_backward_assign(py::module& module, const binary_backward_operation_t& operation, std::string_view description, std::string_view supported_dtype) { +void bind_binary_backward_assign(py::module& module, const binary_backward_operation_t& operation, const std::string_view description, const std::string_view supported_dtype = "") { auto doc = fmt::format( R"doc( {2} Args: - grad_tensor (ttnn.Tensor): the input tensor. + grad_tensor (ttnn.Tensor): the input gradient tensor. input_tensor_a (ttnn.Tensor): the input tensor. input_tensor_b (ttnn.Tensor): the input tensor. @@ -960,7 +940,7 @@ void bind_binary_backward_assign(py::module& module, const binary_backward_opera queue_id (int, optional): command queue id. Defaults to `0`. round_mode (str, optional): Round mode for the operation. Defaults to `None`. - Supported dtypes, layouts, and ranks: + Note: {3} Example: @@ -1027,80 +1007,101 @@ void py_module(py::module& module) { module, ttnn::mul_bw, R"doc(Performs backward operations for multiply on :attr:`input_tensor_a`, :attr:`input_tensor_b`, with given :attr:`grad_tensor`.)doc", - R"doc( - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc"); + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_binary_bw( module, ttnn::add_bw, R"doc(Performs backward operations for add of :attr:`input_tensor_a` and :attr:`input_tensor_b` or :attr:`scalar` with given :attr:`grad_tensor`.)doc", - R"doc( - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc"); + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_binary_bw( module, ttnn::sub_bw, R"doc(Performs backward operations for subtract of :attr:`input_tensor_a` and :attr:`input_tensor_b` or :attr:`scalar` with given :attr:`grad_tensor`.)doc", - R"doc( - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc"); + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_binary_bw_div( module, ttnn::div_bw, - R"doc(Performs backward operations for divide on :attr:`input_tensor`, :attr:`alpha` or :attr:`input_tensor_a`, attr:`input_tensor_b`, attr:`round_mode`, with given :attr:`grad_tensor`.)doc", - R"doc( - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc"); + R"doc(Performs backward operations for divide on :attr:`input_tensor`, :attr:`alpha` or :attr:`input_tensor_a`, :attr:`input_tensor_b`, :attr:`round_mode`, with given :attr:`grad_tensor`.)doc", + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_binary_backward_overload( module, ttnn::remainder_bw, R"doc(Performs backward operations for remainder of :attr:`input_tensor_a`, :attr:`scalar` or :attr:`input_tensor_b` with given :attr:`grad_tensor`.)doc", - R"doc( - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | ROW_MAJOR, TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc"); + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | ROW_MAJOR, TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_binary_backward_overload( module, ttnn::fmod_bw, R"doc(Performs backward operations for fmod of :attr:`input_tensor_a`, :attr:`scalar` or :attr:`input_tensor_b` with given :attr:`grad_tensor`.)doc", - R"doc( - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | ROW_MAJOR, TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc"); + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | ROW_MAJOR, TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_binary_backward_assign( module, ttnn::assign_bw, R"doc(Performs backward operations for assign of :attr:`input_tensor_a`, :attr:`input_tensor_b` with given :attr:`grad_tensor`.)doc", - R"doc( - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | ROW_MAJOR, TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc"); + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | ROW_MAJOR, TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_binary_bw_optional( module, @@ -1111,147 +1112,186 @@ void py_module(py::module& module) { module, ttnn::atan2_bw, R"doc(Performs backward operations for atan2 of :attr:`input_tensor_a` and :attr:`input_tensor_b` with given :attr:`grad_tensor`.)doc", - R"doc( - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc"); + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_binary_backward_sub_alpha( module, ttnn::subalpha_bw, "alpha", "Alpha value", 1.0f, R"doc(Performs backward operations for subalpha of :attr:`input_tensor_a` and :attr:`input_tensor_b` with given :attr:`grad_tensor`.)doc", - R"doc( - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc"); + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_binary_backward_opt_float_default( module, ttnn::addalpha_bw, "alpha", "Alpha value", 1.0f, - R"doc(Performs backward operations for addalpha on :attr:`input_tensor_b` , :attr:`input_tensor_a` and :attr:`alpha` with given attr:`grad_tensor`.)doc", - R"doc( - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc"); + R"doc(Performs backward operations for addalpha on :attr:`input_tensor_b` , :attr:`input_tensor_a` and :attr:`alpha` with given :attr:`grad_tensor`.)doc", + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_binary_backward_ops( module, ttnn::xlogy_bw, R"doc(Performs backward operations for xlogy of :attr:`input_tensor_a` and :attr:`input_tensor_b` with given :attr:`grad_tensor`.)doc", - R"doc( - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc"); + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_binary_backward_ops( module, ttnn::hypot_bw, R"doc(Performs backward operations for hypot of :attr:`input_tensor_a` and :attr:`input_tensor_b` with given :attr:`grad_tensor`.)doc", - R"doc( - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc"); + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_binary_backward_ops( module, ttnn::ldexp_bw, R"doc(Performs backward operations for ldexp of :attr:`input_tensor_a` and :attr:`input_tensor_b` with given :attr:`grad_tensor`.)doc", - R"doc( - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | ROW_MAJOR | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc"); + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | ROW_MAJOR | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_binary_backward_ops( module, ttnn::logaddexp_bw, R"doc(Performs backward operations for logaddexp of :attr:`input_tensor_a` and :attr:`input_tensor_b` with given :attr:`grad_tensor`.)doc", - R"doc( - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc"); + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_binary_backward_ops( module, ttnn::logaddexp2_bw, R"doc(Performs backward operations for logaddexp2 of :attr:`input_tensor_a` and :attr:`input_tensor_b` with given :attr:`grad_tensor`.)doc", - R"doc( - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc"); + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_binary_backward_ops( module, ttnn::squared_difference_bw, R"doc(Performs backward operations for squared_difference of :attr:`input_tensor_a` and :attr:`input_tensor_b` with given :attr:`grad_tensor`.)doc", - R"doc( - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc"); + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_binary_backward_int_default( module, ttnn::concat_bw, "dim", "Dimension to concatenate", 0, R"doc(Performs backward operations for concat on :attr:`input_tensor_a` and :attr:`input_tensor_b` with given :attr:`grad_tensor`.)doc", - R"doc( - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc"); + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_binary_backward_rsub( module, ttnn::rsub_bw, R"doc(Performs backward operations for subraction of :attr:`input_tensor_a` from :attr:`input_tensor_b` with given :attr:`grad_tensor` (reversed order of subtraction operator).)doc", - R"doc( - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc"); + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_binary_backward_ops( module, ttnn::min_bw, R"doc(Performs backward operations for minimum of :attr:`input_tensor_a` and :attr:`input_tensor_b` with given :attr:`grad_tensor`.)doc", - R"doc( - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc"); + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_binary_backward_ops( module, ttnn::max_bw, R"doc(Performs backward operations for maximum of :attr:`input_tensor_a` and :attr:`input_tensor_b` with given :attr:`grad_tensor`.)doc", - R"doc( - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc"); + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_binary_backward_float_string_default( module, @@ -1263,12 +1303,15 @@ void py_module(py::module& module) { "none", R"doc(Performs backward operations for bias_gelu on :attr:`input_tensor_a` and :attr:`input_tensor_b` or :attr:`input_tensor` and :attr:`bias`, with given :attr:`grad_tensor` using given :attr:`approximate` mode. :attr:`approximate` mode can be 'none', 'tanh'.)doc", - R"doc( - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc"); + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | ROW_MAJOR, TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); } } // namespace binary_backward diff --git a/ttnn/cpp/ttnn/operations/eltwise/ternary_backward/ternary_backward_pybind.hpp b/ttnn/cpp/ttnn/operations/eltwise/ternary_backward/ternary_backward_pybind.hpp index 99154fe6c06..f17839c8461 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/ternary_backward/ternary_backward_pybind.hpp +++ b/ttnn/cpp/ttnn/operations/eltwise/ternary_backward/ternary_backward_pybind.hpp @@ -20,18 +20,18 @@ namespace ternary_backward { namespace detail { template -void bind_ternary_backward(py::module& module, const ternary_backward_operation_t& operation, std::string_view description, std::string_view supported_dtype) { +void bind_ternary_backward(py::module& module, const ternary_backward_operation_t& operation, const std::string_view description, const std::string_view supported_dtype = "") { auto doc = fmt::format( R"doc( {2} Args: - grad_tensor (ttnn.Tensor): the input tensor. + grad_tensor (ttnn.Tensor): the input gradient tensor. input_tensor_a (ttnn.Tensor): the input tensor. input_tensor_b (ttnn.Tensor): the input tensor. input_tensor_c (ttnn.Tensor or Number): the input tensor. - alpha (float, nuber): the alpha value. + alpha (float): the alpha value. Keyword args: memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. @@ -39,7 +39,8 @@ void bind_ternary_backward(py::module& module, const ternary_backward_operation_ Returns: List of ttnn.Tensor: the output tensor. - {3} + Note: + {3} Example: @@ -51,8 +52,8 @@ void bind_ternary_backward(py::module& module, const ternary_backward_operation_ )doc", operation.base_name(), operation.python_fully_qualified_name(), - supported_dtype, - description); + description, + supported_dtype); bind_registered_operation( module, @@ -80,7 +81,7 @@ void bind_ternary_backward(py::module& module, const ternary_backward_operation_ } template -void bind_ternary_backward_op(py::module& module, const ternary_backward_operation_t& operation, std::string_view description, std::string_view supported_dtype) { +void bind_ternary_backward_op(py::module& module, const ternary_backward_operation_t& operation, const std::string_view description, const std::string_view supported_dtype = "") { auto doc = fmt::format( R"doc( {2} @@ -101,7 +102,8 @@ void bind_ternary_backward_op(py::module& module, const ternary_backward_operati List of ttnn.Tensor: the output tensor. - {3} + Note: + {3} Note : bfloat8_b/bfloat4_b is only supported on TILE_LAYOUT @@ -118,8 +120,8 @@ void bind_ternary_backward_op(py::module& module, const ternary_backward_operati operation.base_name(), operation.python_fully_qualified_name(), - supported_dtype, - description); + description, + supported_dtype); bind_registered_operation( module, @@ -160,7 +162,7 @@ void bind_ternary_backward_op(py::module& module, const ternary_backward_operati } template -void bind_ternary_backward_optional_output(py::module& module, const ternary_backward_operation_t& operation, std::string_view description, std::string_view supported_dtype) { +void bind_ternary_backward_optional_output(py::module& module, const ternary_backward_operation_t& operation, const std::string_view description, const std::string_view supported_dtype = "") { auto doc = fmt::format( R"doc( @@ -182,7 +184,8 @@ void bind_ternary_backward_optional_output(py::module& module, const ternary_bac List of ttnn.Tensor: the output tensor. - {3} + Note: + {3} Example: @@ -194,8 +197,8 @@ void bind_ternary_backward_optional_output(py::module& module, const ternary_bac )doc", operation.base_name(), operation.python_fully_qualified_name(), - supported_dtype, - description); + description, + supported_dtype); bind_registered_operation( module, @@ -233,62 +236,66 @@ void py_module(py::module& module) { detail::bind_ternary_backward( module, ttnn::addcmul_bw, + R"doc(Performs backward operations for addcmul of :attr:`input_tensor_a` , :attr:`input_tensor_b` and :attr:`input_tensor_c` with given :attr:`grad_tensor`.)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ - Note : bfloat8_b/bfloat4_b supports only on TILE_LAYOUT)doc", - R"doc(Performs backward operations for addcmul of :attr:`input_tensor_a` , :attr:`input_tensor_b` and :attr:`input_tensor_c` with given :attr:`grad_tensor`.)doc"); + )doc"); detail::bind_ternary_backward( module, ttnn::addcdiv_bw, + R"doc(Performs backward operations for addcdiv of :attr:`input_tensor_a` , :attr:`input_tensor_b` and :attr:`input_tensor_c` with given :attr:`grad_tensor`.)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for addcdiv of :attr:`input_tensor_a` , :attr:`input_tensor_b` and :attr:`input_tensor_c` with given :attr:`grad_tensor`.)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_ternary_backward_optional_output( module, ttnn::where_bw, + R"doc(Performs backward operations for where of :attr:`input_tensor_a` , :attr:`input_tensor_b` and :attr:`input_tensor_c` with given :attr:`grad_tensor`.)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for where of :attr:`input_tensor_a` , :attr:`input_tensor_b` and :attr:`input_tensor_c` with given :attr:`grad_tensor`.)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_ternary_backward_op( module, ttnn::lerp_bw, + R"doc(Performs backward operations for lerp of :attr:`input_tensor_a` , :attr:`input_tensor_b` and :attr:`input_tensor_c` or :attr:`scalar` with given :attr:`grad_tensor`.)doc", R"doc(Supported dtypes, layouts, and ranks: For Inputs : :attr:`input_tensor_a` , :attr:`input_tensor_b` and :attr:`input_tensor_c` - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ Supported dtypes, layouts, and ranks: For Inputs : :attr:`input_tensor_a` , :attr:`input_tensor_b` and :attr:`scalar` - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ - )doc", - R"doc(Performs backward operations for lerp of :attr:`input_tensor_a` , :attr:`input_tensor_b` and :attr:`input_tensor_c` or :attr:`scalar` with given :attr:`grad_tensor`.)doc"); + )doc"); } diff --git a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp index 96390705cde..24252c47264 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp +++ b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp @@ -22,13 +22,14 @@ namespace detail { template void bind_unary_backward_two_float( - py::module& module, const unary_backward_operation_t& operation, std::string_view description, std::string_view supported_dtype) { + py::module& module, const unary_backward_operation_t& operation, const std::string_view description, const std::string_view supported_dtype = "") { auto doc = fmt::format( R"doc( + {2} Args: - grad_tensor (ttnn.Tensor): the input tensor. + grad_tensor (ttnn.Tensor): the input gradient tensor. input_tensor (ttnn.Tensor): the input tensor. threshold (float): the input threshold value. value (float): the input value. @@ -39,7 +40,8 @@ void bind_unary_backward_two_float( Returns: List of ttnn.Tensor: the output tensor. - {3} + Note: + {3} Example: @@ -49,8 +51,8 @@ void bind_unary_backward_two_float( )doc", operation.base_name(), operation.python_fully_qualified_name(), - supported_dtype, - description); + description, + supported_dtype); bind_registered_operation( module, @@ -75,13 +77,13 @@ void bind_unary_backward_two_float( template void bind_unary_backward_op( - py::module& module, const unary_backward_operation_t& operation, const std::string& description, const std::string& supported_dtype) { + py::module& module, const unary_backward_operation_t& operation, const std::string& description, const std::string& supported_dtype = "") { auto doc = fmt::format( R"doc( {2} Args: - grad_tensor (ttnn.Tensor): the input tensor. + grad_tensor (ttnn.Tensor): the input gradient tensor. input_tensor_a (ttnn.Tensor): the input tensor. Keyword args: @@ -90,7 +92,8 @@ void bind_unary_backward_op( Returns: List of ttnn.Tensor: the output tensor. - {3} + Note: + {3} Example: @@ -100,8 +103,8 @@ void bind_unary_backward_op( )doc", operation.base_name(), operation.python_fully_qualified_name(), - supported_dtype, - description); + description, + supported_dtype); bind_registered_operation( module, @@ -122,22 +125,23 @@ void bind_unary_backward_op( template void bind_unary_backward_rsqrt( - py::module& module, const unary_backward_operation_t& operation, std::string_view description, std::string_view supported_dtype) { + py::module& module, const unary_backward_operation_t& operation, const std::string_view description, const std::string_view supported_dtype = "") { auto doc = fmt::format( - R"doc({0}(grad_tensor: ttnn.Tensor, input_tensor: ttnn.Tensor, *, memory_config: ttnn.MemoryConfig) -> std::vector + R"doc( {2} Args: - * :attr:`grad_tensor` - * :attr:`input_tensor` + grad_tensor (ttnn.Tensor): the input gradient tensor. + input_tensor (ttnn.Tensor): the input tensor. Keyword args: - * :attr:`memory_config` [ttnn.MemoryConfig]: memory config for the output tensor - * :attr:`output_tensor` (Optional[ttnn.Tensor]): preallocated output tensor - * :attr:`queue_id` (Optional[uint8]): command queue id + memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. + output_tensor (ttnn.Tensor, optional): preallocated output tensor. Defaults to `None`. + queue_id (uint8, optional): command queue id. Defaults to `0`. - {3} + Note: + {3} Example: @@ -147,8 +151,8 @@ void bind_unary_backward_rsqrt( )doc", operation.base_name(), operation.python_fully_qualified_name(), - supported_dtype, - description); + description, + supported_dtype); bind_registered_operation( module, @@ -179,7 +183,7 @@ void bind_unary_backward_op_reciprocal( {2} Args: - grad_tensor (ComplexTensor or ttnn.Tensor): the input tensor. + grad_tensor (ComplexTensor or ttnn.Tensor): the input gradient tensor. input_tensor_a (ComplexTensor or ttnn.Tensor): the input tensor. Keyword args: @@ -229,13 +233,13 @@ void bind_unary_backward_op_reciprocal( template void bind_unary_backward_op_overload_abs( - py::module& module, const unary_backward_operation_t& operation, std::string_view description) { + py::module& module, const unary_backward_operation_t& operation, const std::string_view description) { auto doc = fmt::format( R"doc( {2} Args: - grad_tensor (ttnn.Tensor): the input tensor. + grad_tensor (ttnn.Tensor): the input gradient tensor. input_tensor (ComplexTensor or ttnn.Tensor): the input tensor. Keyword args: @@ -290,7 +294,7 @@ void bind_unary_backward_float(py::module& module, const unary_backward_operatio {2} Args: - grad_tensor (ttnn.Tensor): the input tensor. + grad_tensor (ttnn.Tensor): the input gradient tensor. input_tensor (ttnn.Tensor): the input tensor. float_value (Number): the input tensor. @@ -341,13 +345,13 @@ void bind_unary_backward_two_float_with_default( const std::string& parameter_name_b, const std::string& parameter_b_doc, float parameter_b_value, - std::string_view description) { + const std::string_view description) { auto doc = fmt::format( R"doc( {8} Args: - grad_tensor (ComplexTensor or ttnn.Tensor): the input tensor. + grad_tensor (ComplexTensor or ttnn.Tensor): the input gradient tensor. input_tensor (ComplexTensor or ttnn.Tensor): the input tensor. Keyword args: @@ -408,7 +412,7 @@ void bind_unary_backward_float_with_default( {5} Args: - grad_tensor (ttnn.Tensor): the input tensor. + grad_tensor (ttnn.Tensor): the input gradient tensor. input_tensor_a (ttnn.Tensor): the input tensor. @@ -467,7 +471,7 @@ void bind_unary_backward_optional_float_params_with_default( {8} Args: - grad_tensor (ttnn.Tensor): the input tensor. + grad_tensor (ttnn.Tensor): the input gradient tensor. input_tensor (ttnn.Tensor): the input tensor. Keyword args: @@ -524,19 +528,18 @@ void bind_unary_backward_float_string_default( const std::string& parameter_name_b, const std::string& parameter_b_doc, string parameter_b_value, - std::string_view description) { + const std::string_view description) { auto doc = fmt::format( R"doc( {7} Args: - grad_tensor (ttnn.Tensor): the input tensor. + grad_tensor (ttnn.Tensor): the input gradient tensor. input_tensor_a (ttnn.Tensor): the input tensor. - input_tensor_b (float): the input scalar. - + {2} (float): {3}. Keyword args: - round_mode (round_mode, optional): Round mode for the operation. Defaults to `None`. + {4} (string, optional): {5} , Defaults to {6}. memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. Returns: @@ -585,13 +588,13 @@ void bind_unary_backward_string_default( const std::string& parameter_name_a, const std::string& parameter_a_doc, string parameter_a_value, - std::string_view description) { + const std::string_view description) { auto doc = fmt::format( R"doc( {5} Args: - grad_tensor (ttnn.Tensor): the input tensor. + grad_tensor (ttnn.Tensor): the input gradient tensor. input_tensor_a (ttnn.Tensor): the input tensor. Keyword args: @@ -639,15 +642,15 @@ void bind_unary_backward_unary_optional_float( const unary_backward_operation_t& operation, const std::string& parameter_name, const std::string& parameter_doc, - std::string_view description) { + const std::string_view description) { auto doc = fmt::format( R"doc( {4} Args: - grad_tensor (ttnn.Tensor): the input grad tensor. + grad_tensor (ttnn.Tensor): the input gradient tensor. input_tensor_a (ttnn.Tensor): the input tensor. - input_tensor_b (ttnn.Tensor or Number): the input tensor. + {2} (ttnn.Tensor or Number): {3}. Keyword args: memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. @@ -699,15 +702,15 @@ void bind_unary_backward_shape( const unary_backward_operation_t& operation, const std::string& parameter_name_a, const std::string& parameter_a_doc, - std::string_view description) { + const std::string_view description) { auto doc = fmt::format( R"doc( {4} Args: - grad_tensor (ttnn.Tensor): the input tensor. + grad_tensor (ttnn.Tensor): the input gradient tensor. input_tensor (ttnn.Tensor): the input tensor. - {2} (string): {3} of tensor. + {2} (List[int]): {3}. Keyword args: memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. @@ -748,13 +751,13 @@ void bind_unary_backward_shape( template void bind_unary_backward_optional( - py::module& module, const unary_backward_operation_t& operation, std::string_view description, std::string_view supported_dtype = "") { + py::module& module, const unary_backward_operation_t& operation, const std::string_view description) { auto doc = fmt::format( R"doc( {2} Args: - grad_tensor (ComplexTensor or ttnn.Tensor): the input tensor. + grad_tensor (ComplexTensor or ttnn.Tensor): the input gradient tensor. input_tensor_a (ComplexTensor or ttnn.Tensor): the input tensor. Keyword args: @@ -765,7 +768,58 @@ void bind_unary_backward_optional( Returns: List of ttnn.Tensor: the output tensor. - {3} + Example: + + >>> grad_tensor = ttnn.to_device(ttnn.from_torch(torch.tensor((1, 2), dtype=torch.bfloat16)), device=device) + >>> tensor1 = ttnn.to_device(ttnn.from_torch(torch.tensor((1, 2), dtype=torch.bfloat16)), device=device) + >>> output = {1}(grad_tensor, tensor) + )doc", + operation.base_name(), + operation.python_fully_qualified_name(), + description); + + bind_registered_operation( + module, + operation, + doc, + ttnn::pybind_overload_t{ + [](const unary_backward_operation_t& self, + const ttnn::Tensor& grad_tensor, + const ttnn::Tensor& input_tensor, + const std::optional& memory_config, + const std::optional& input_grad, + const uint8_t& queue_id) -> std::vector> { + return self(queue_id, grad_tensor, input_tensor, memory_config, input_grad); + }, + py::arg("grad_tensor"), + py::arg("input_tensor"), + py::kw_only(), + py::arg("memory_config") = std::nullopt, + py::arg("input_grad") = std::nullopt, + py::arg("queue_id") = ttnn::DefaultQueueId}); +} + +template +void bind_unary_backward_neg( + py::module& module, const unary_backward_operation_t& operation, const std::string_view description, const std::string_view supported_dtype = "") { + auto doc = fmt::format( + R"doc( + {2} + + Args: + grad_tensor (ComplexTensor or ttnn.Tensor): the input gradient tensor. + input_tensor_a (ComplexTensor or ttnn.Tensor): the input tensor. + + Keyword args: + memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. + output_tensor (ttnn.Tensor, optional): Preallocated output tensor. Defaults to `None`. + queue_id (int, optional): command queue id. Defaults to `0`. + + Returns: + List of ttnn.Tensor: the output tensor. + + Note: + {3} Example: @@ -806,7 +860,7 @@ void bind_unary_backward_prod_bw(py::module& module, const unary_backward_operat Performs backward operations for prod on input along `all_dimensions` or a particular `dim`. Args: - grad_tensor (ttnn.Tensor): the input tensor. + grad_tensor (ttnn.Tensor): the input gradient tensor. input_tensor_a (ttnn.Tensor): the input tensor. Keyword args: @@ -849,17 +903,18 @@ void bind_unary_backward_prod_bw(py::module& module, const unary_backward_operat template -void bind_unary_backward_opt(py::module& module, const unary_backward_operation_t& operation, std::string_view description) { +void bind_unary_backward_opt(py::module& module, const unary_backward_operation_t& operation, const std::string_view description) { auto doc = fmt::format( R"doc( {2} Args: - * :attr:`grad_tensor` - * :attr:`input_tensor` + grad_tensor (ttnn.Tensor): the input gradient tensor. + input_tensor_a (ttnn.Tensor): the input tensor. + Keyword args: - * :attr:`memory_config` (Optional[ttnn.MemoryConfig]): memory config for the output tensor + memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. Example: @@ -901,7 +956,7 @@ void bind_unary_backward( {2} Args: - grad_tensor (ttnn.Tensor): the input tensor. + grad_tensor (ttnn.Tensor): the input gradient tensor. input_tensor_a (ttnn.Tensor): the input tensor. Keyword args: @@ -946,21 +1001,21 @@ void bind_unary_backward_gelu( const std::string& parameter_name_a, const std::string& parameter_a_doc, string parameter_a_value, - std::string_view description) { + const std::string_view description) { auto doc = fmt::format( - R"doc({0}(grad_tensor: ttnn.Tensor, input_tensor: ttnn.Tensor, {2}: string, *, memory_config: ttnn.MemoryConfig) -> std::vector - + R"doc( {5} Args: - * :attr:`grad_tensor` - * :attr:`input_tensor` + grad_tensor (ttnn.Tensor): the input gradient tensor. + input_tensor_a (ttnn.Tensor): the input tensor. Keyword args: - * :attr:`{2}` (string): {3} , Default value = {4} - * :attr:`memory_config` (Optional[ttnn.MemoryConfig]): memory config for the output tensor - * :attr:`output_tensor` (Optional[ttnn.Tensor]): preallocated output tensor - * :attr:`queue_id` (Optional[uint8]): command queue id + {2}` (string): {3} , Defaults to {4}. + memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. + output_tensor (ttnn.Tensor, optional): preallocated output tensor. Defaults to `None`. + queue_id (uint8, optional): command queue id. Defaults to `0`. + Example: @@ -1074,14 +1129,16 @@ void py_module(py::module& module) { detail::bind_unary_backward_two_float( module, ttnn::threshold_bw, + R"doc(Performs backward operations for threshold on :attr:`input_tensor`, :attr:`threshold`, :attr:`value` with given :attr:`grad_tensor`.)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for threshold on :attr:`input_tensor`, :attr:`threshold`, :attr:`value` with given :attr:`grad_tensor`.)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_two_float_with_default( module, @@ -1109,8 +1166,8 @@ void py_module(py::module& module) { module, ttnn::repeat_bw, "shape", - "Shape", - R"doc(Performs backward operations for repeat on :attr:`input_tensor_a` or :attr:`input_tensor`, with given :attr:`grad_tensor` using given :attr:`shape`.)doc"); + "Shape of tensor", + R"doc(Performs backward operations for repeat on :attr:`input_tensor`, with given :attr:`grad_tensor` using given :attr:`shape`.)doc"); detail::bind_unary_backward_gelu( module, @@ -1181,146 +1238,170 @@ void py_module(py::module& module) { detail::bind_unary_backward_op( module, ttnn::acos_bw, + R"doc(Performs backward operations for inverse hyperbolic cosine (acosh) on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for inverse hyperbolic cosine (acosh) on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::atan_bw, + R"doc(Performs backward operations for atan on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for atan on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::rad2deg_bw, + R"doc(Performs backward operations for radian to degree conversion (rad2deg) on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc" R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for radian to degree conversion (rad2deg) on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::frac_bw, + R"doc(Performs backward operations for frac on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE, ROW_MAJOR | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for frac on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE, ROW_MAJOR | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::trunc_bw, + R"doc(Performs backward operations for truncation on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE, ROW_MAJOR | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for truncation on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE, ROW_MAJOR | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::log_sigmoid_bw, + R"doc(Performs backward operations for log sigmoid on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for log sigmoid on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::fill_zero_bw, + R"doc(Performs backward operations of fill zero on :attr:`input_tensor` with given :attr:`grad_tensor`. Returns an tensor of zeros like :attr:`grad_tensor`.)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE, ROW_MAJOR | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations of fill zero on :attr:`input_tensor` with given :attr:`grad_tensor`. Returns an tensor of zeros like :attr:`grad_tensor`.)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE, ROW_MAJOR | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::i0_bw, + R"doc(Performs backward operations for i0 on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for i0 on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::tan_bw, + R"doc(Performs backward operations for tan on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for tan on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::sigmoid_bw, + R"doc(Performs backward operations for sigmoid on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for sigmoid on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_rsqrt( module, ttnn::rsqrt_bw, + R"doc(Performs backward operations for rsqrt on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for rsqrt on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ - detail::bind_unary_backward_optional( + )doc"); + + detail::bind_unary_backward_neg( module, ttnn::neg_bw, + R"doc(Performs backward operations for neg on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for neg on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward( module, @@ -1355,235 +1436,273 @@ void py_module(py::module& module) { detail::bind_unary_backward_op( module, ttnn::relu6_bw, + R"doc(Performs backward operations for relu6 on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for relu6 on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op_overload_abs( module, ttnn::abs_bw, R"doc(Performs backward operations for abs on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); - detail::bind_unary_backward_optional( + detail::bind_unary_backward_neg( module, ttnn::silu_bw, + R"doc(Performs backward operations for silu on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for silu on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::selu_bw, + R"doc(Performs backward operations for selu on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for selu on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::square_bw, + R"doc(Performs backward operations for square on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for square on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::hardswish_bw, + R"doc(Performs backward operations for hardswish on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for hardswish on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::tanhshrink_bw, + R"doc(Performs backward operations for tanhshrink on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for tanhshrink on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::atanh_bw, + R"doc(Performs backward operations for atanh on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for atanh on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::asin_bw, + R"doc(Performs backward operations for asin on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for asin on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::asinh_bw, + R"doc(Performs backward operations for asinh on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for asinh on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::sin_bw, + R"doc(Performs backward operations for sin on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for sin on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::sinh_bw, + R"doc(Performs backward operations for sinh on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for sinh on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::log10_bw, + R"doc(Performs backward operations for log10 on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for log10 on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::log1p_bw, + R"doc(Performs backward operations for log1p on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for log1p on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::erfc_bw, + R"doc(Performs backward operations for erfc on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for erfc on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::ceil_bw, + R"doc(Performs backward operations for ceil on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE, ROW_MAJOR | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for ceil on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE, ROW_MAJOR | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::softsign_bw, + R"doc(Performs backward operations for softsign on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for softsign on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::cosh_bw, + R"doc(Performs backward operations for cosh on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for cosh on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::log2_bw, + R"doc(Performs backward operations for log2 on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for log2 on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::sign_bw, + R"doc(Performs backward operations for sign on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE, ROW_MAJOR | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for sign on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE, ROW_MAJOR | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_float( module, @@ -1593,26 +1712,30 @@ void py_module(py::module& module) { detail::bind_unary_backward_op( module, ttnn::exp2_bw, + R"doc(Performs backward operations for exp2 on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for exp2 on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::expm1_bw, + R"doc(Performs backward operations for exp2 on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for exp2 on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op_reciprocal( module, @@ -1622,55 +1745,63 @@ void py_module(py::module& module) { detail::bind_unary_backward_op( module, ttnn::digamma_bw, + R"doc(Performs backward operations for digamma on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for digamma on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::erfinv_bw, + R"doc(Performs backward operations for erfinv on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for erfinv on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::erf_bw, + R"doc(Performs backward operations for erf on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for erf on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, ttnn::deg2rad_bw, + R"doc(Performs backward operations for deg2rad on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+)doc", - R"doc(Performs backward operations for deg2rad on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_float( module, ttnn::polygamma_bw, - R"doc(Performs backward operations for polygamma on :attr:`input_tensor` or attr:`input_tensor_a`, attr:`scalar` with given :attr:`grad_tensor`.)doc"); + R"doc(Performs backward operations for polygamma on :attr:`input_tensor` or :attr:`input_tensor_a`, :attr:`scalar` with given :attr:`grad_tensor`.)doc"); } } // namespace unary_backward