From 75d99cbbd51965b323579da998474e8568453d5b Mon Sep 17 00:00:00 2001 From: mouliraj-mcw Date: Thu, 11 Jul 2024 14:59:38 +0000 Subject: [PATCH] #10071: Update CPP files --- docs/source/ttnn/ttnn/dependencies/tt_lib.rst | 4 -- .../op_library/backward/backward_ops.cpp | 41 ------------------- .../op_library/backward/backward_ops.hpp | 10 ----- .../tt_lib_bindings_tensor_backward_ops.cpp | 17 -------- .../device/unary_backward_op.cpp | 16 ++++---- 5 files changed, 8 insertions(+), 80 deletions(-) diff --git a/docs/source/ttnn/ttnn/dependencies/tt_lib.rst b/docs/source/ttnn/ttnn/dependencies/tt_lib.rst index 8e59eb3a369..8f71224774a 100644 --- a/docs/source/ttnn/ttnn/dependencies/tt_lib.rst +++ b/docs/source/ttnn/ttnn/dependencies/tt_lib.rst @@ -814,8 +814,6 @@ Backward Operations .. autofunction:: tt_lib.tensor.fill_bw -.. autofunction:: tt_lib.tensor.unary_sub_bw - .. autofunction:: tt_lib.tensor.complex_abs_bw .. autofunction:: tt_lib.tensor.lt_bw @@ -878,8 +876,6 @@ Backward Operations .. autofunction:: tt_lib.tensor.reciprocal_bw -.. autofunction:: tt_lib.tensor.rpow_bw - .. autofunction:: tt_lib.tensor.square_bw .. autofunction:: tt_lib.tensor.tanhshrink_bw diff --git a/tt_eager/tt_dnn/op_library/backward/backward_ops.cpp b/tt_eager/tt_dnn/op_library/backward/backward_ops.cpp index 8f17edc8f92..7f878cc5bf8 100644 --- a/tt_eager/tt_dnn/op_library/backward/backward_ops.cpp +++ b/tt_eager/tt_dnn/op_library/backward/backward_ops.cpp @@ -280,26 +280,6 @@ std::vector ne_bw(const Tensor& grad, const MemoryConfig& output_mem_con return operation::decorate_as_composite(__func__, _ne_bw)(grad, output_mem_config); } -std::vector _rsqrt_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { - std::vector grad_tensor; - Tensor rsqrt_result = power(rsqrt(input, true, output_mem_config), 3, output_mem_config); - Tensor result = mul_unary(ttnn::multiply(grad, rsqrt_result, std::nullopt, output_mem_config), -0.5, output_mem_config); - float t_inf = std::numeric_limits::infinity(); - result = where(eqz(input, output_mem_config), t_inf, result, output_mem_config); - float t_nan = std::nanf(""); - result = where(ltz(input, output_mem_config), t_nan, result, output_mem_config); - result = where( - ttnn::logical_and(eqz(input, output_mem_config), eqz(grad, output_mem_config), std::nullopt, output_mem_config), - t_nan, - result, - output_mem_config); - grad_tensor.emplace_back(result); - return grad_tensor; -} -std::vector rsqrt_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { - return operation::decorate_as_composite(__func__, _rsqrt_bw)(grad, input, output_mem_config); -} - // bw(expm1) = grad * expm1(input) + 1 std::vector _expm1_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { std::vector grad_tensor; @@ -980,27 +960,6 @@ std::vector reciprocal_bw(const Tensor& grad, const Tensor& input, const return operation::decorate_as_composite(__func__, _reciprocal_bw)(grad, input, output_mem_config); } -std::vector _rpow_bw( - const Tensor& grad, const Tensor& input, float exponent, const MemoryConfig& output_mem_config) { - std::vector grad_tensor; - float t_nan = std::nanf(""); - Tensor grad_result = zeros_like(input, output_mem_config); - if (exponent != 0.0) { - grad_result = - ttnn::multiply(grad, - ttnn::multiply(pow(input, exponent - 1, output_mem_config), exponent, std::nullopt, output_mem_config), - std::nullopt, - output_mem_config); - grad_result = where(ttnn::ltz(input, output_mem_config), t_nan, grad_result, output_mem_config); - } - grad_tensor.emplace_back(grad_result); - return grad_tensor; -} -std::vector rpow_bw( - const Tensor& grad, const Tensor& input, float exponent, const MemoryConfig& output_mem_config) { - return operation::decorate_as_composite(__func__, _rpow_bw)(grad, input, exponent, output_mem_config); -} - // Autoformat support Tensor change_layout_to_tile(const Tensor& temp, const MemoryConfig& output_mem_config) { auto formatted_input_tensor = temp; diff --git a/tt_eager/tt_dnn/op_library/backward/backward_ops.hpp b/tt_eager/tt_dnn/op_library/backward/backward_ops.hpp index 1e4b649a92c..905f3ad2adf 100644 --- a/tt_eager/tt_dnn/op_library/backward/backward_ops.hpp +++ b/tt_eager/tt_dnn/op_library/backward/backward_ops.hpp @@ -101,10 +101,6 @@ std::vector> tanh_bw( std::vector fill_bw( const Tensor& grad, const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG); -std::vector unary_sub_bw( - const Tensor& grad, - const Tensor& input, - const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG); std::vector binary_le_bw( const Tensor& grad, @@ -279,12 +275,6 @@ std::vector reciprocal_bw( const Tensor& input, const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG); -std::vector rpow_bw( - const Tensor& grad, - const Tensor& input, - float exponent, - const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG); - std::vector square_bw( const Tensor& grad, const Tensor& input, diff --git a/tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp b/tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp index 5c7864f1a1b..64ab793d044 100644 --- a/tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp +++ b/tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp @@ -791,23 +791,6 @@ namespace tt::tt_metal::detail{ "output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No" )doc"); - m_tensor.def("rpow_bw", &tt::tt_metal::rpow_bw, - py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("exponent").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc( - Performs backward operations for rpow for the ``input`` and ``exponent`` with given ``grad`` - - Input tensors must have BFLOAT16 data type. - - Output tensor will have BFLOAT16 data type. - - .. csv-table:: - :header: "Argument", "Description", "Data type", "Valid range", "Required" - - "grad", "Gradient tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes" - "input", "Tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes" - "exponent", "exponent", "float", ">0.0", "Yes" - "output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No" - )doc"); - m_tensor.def("square_bw", &tt::tt_metal::square_bw, py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc( Performs backward square operations on ``input`` tensors with given ``grad``. diff --git a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.cpp b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.cpp index c9a05b8566e..bbcffb294c3 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.cpp +++ b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/device/unary_backward_op.cpp @@ -358,14 +358,14 @@ std::vector _rpow_bw( std::vector _floor_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { std::vector grad_tensor; - Tensor t_zero = tt::tt_metal::zeros_like(grad, output_mem_config); + Tensor t_zero = ttnn::operations::creation::zeros_like(grad); grad_tensor.emplace_back(t_zero); return grad_tensor; } std::vector _round_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { std::vector grad_tensor; - Tensor t_zero = tt::tt_metal::zeros_like(grad, output_mem_config); + Tensor t_zero = ttnn::operations::creation::zeros_like(grad); grad_tensor.emplace_back(t_zero); return grad_tensor; } @@ -373,8 +373,8 @@ std::vector _round_bw(const Tensor& grad, const Tensor& input, const Mem std::vector _log_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { std::vector grad_tensor; Tensor grad_a = ttnn::multiply(grad, ttnn::reciprocal(input, output_mem_config), std::nullopt, output_mem_config); - Tensor t_inf = tt::tt_metal::full_like(input, std::numeric_limits::infinity(), output_mem_config); - Tensor t_nan = tt::tt_metal::full_like(input, std::nanf(""), output_mem_config); + Tensor t_inf = ttnn::operations::creation::full_like(input, std::numeric_limits::infinity()); + Tensor t_nan = ttnn::operations::creation::full_like(input, std::nanf("")); grad_tensor.emplace_back(where( ttnn::eqz(input, output_mem_config), where( @@ -389,9 +389,9 @@ std::vector _log_bw(const Tensor& grad, const Tensor& input, const Memor std::vector _relu6_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) { std::vector grad_tensor; - Tensor zero_tensor = tt::tt_metal::zeros_like(input, output_mem_config); - Tensor one_tensor = tt::tt_metal::ones_like(input, output_mem_config); - Tensor six_tensor = tt::tt_metal::full_like(input, 6, output_mem_config); + Tensor zero_tensor = ttnn::operations::creation::zeros_like(input); + Tensor one_tensor = ttnn::operations::creation::ones_like(input); + Tensor six_tensor = ttnn::operations::creation::full_like(input, 6); Tensor grad_result = where(ttnn::le(input, zero_tensor, std::nullopt, output_mem_config), zero_tensor, six_tensor, output_mem_config); grad_result = where( @@ -423,7 +423,7 @@ std::vector _silu_bw(const Tensor& grad, const Tensor& input, const Memo std::vector grad_tensor; Tensor grad_sigmoid = ttnn::multiply(grad, ttnn::sigmoid(input, output_mem_config), std::nullopt, output_mem_config); Tensor add_sub = ttnn::add( - ttnn::multiply(ttnn::subtract(tt::tt_metal::full_like(input, 1.0f) , ttnn::sigmoid(input, output_mem_config), std::nullopt, output_mem_config), + ttnn::multiply(ttnn::subtract(ttnn::operations::creation::full_like(input, 1.0f) , ttnn::sigmoid(input, output_mem_config), std::nullopt, output_mem_config), input, std::nullopt, output_mem_config),