Skip to content

Commit

Permalink
#10071: Update CPP files
Browse files Browse the repository at this point in the history
  • Loading branch information
mouliraj-mcw committed Jul 11, 2024
1 parent 5aab738 commit fbecddf
Show file tree
Hide file tree
Showing 4 changed files with 8 additions and 98 deletions.
41 changes: 0 additions & 41 deletions tt_eager/tt_dnn/op_library/backward/backward_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -280,26 +280,6 @@ std::vector<Tensor> ne_bw(const Tensor& grad, const MemoryConfig& output_mem_con
return operation::decorate_as_composite(__func__, _ne_bw)(grad, output_mem_config);
}

std::vector<Tensor> _rsqrt_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) {
std::vector<Tensor> grad_tensor;
Tensor rsqrt_result = power(rsqrt(input, true, output_mem_config), 3, output_mem_config);
Tensor result = mul_unary(ttnn::multiply(grad, rsqrt_result, std::nullopt, output_mem_config), -0.5, output_mem_config);
float t_inf = std::numeric_limits<float>::infinity();
result = where(eqz(input, output_mem_config), t_inf, result, output_mem_config);
float t_nan = std::nanf("");
result = where(ltz(input, output_mem_config), t_nan, result, output_mem_config);
result = where(
ttnn::logical_and(eqz(input, output_mem_config), eqz(grad, output_mem_config), std::nullopt, output_mem_config),
t_nan,
result,
output_mem_config);
grad_tensor.emplace_back(result);
return grad_tensor;
}
std::vector<Tensor> rsqrt_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) {
return operation::decorate_as_composite(__func__, _rsqrt_bw)(grad, input, output_mem_config);
}

// bw(expm1) = grad * expm1(input) + 1
std::vector<Tensor> _expm1_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) {
std::vector<Tensor> grad_tensor;
Expand Down Expand Up @@ -980,27 +960,6 @@ std::vector<Tensor> reciprocal_bw(const Tensor& grad, const Tensor& input, const
return operation::decorate_as_composite(__func__, _reciprocal_bw)(grad, input, output_mem_config);
}

std::vector<Tensor> _rpow_bw(
const Tensor& grad, const Tensor& input, float exponent, const MemoryConfig& output_mem_config) {
std::vector<Tensor> grad_tensor;
float t_nan = std::nanf("");
Tensor grad_result = zeros_like(input, output_mem_config);
if (exponent != 0.0) {
grad_result =
ttnn::multiply(grad,
ttnn::multiply(pow(input, exponent - 1, output_mem_config), exponent, std::nullopt, output_mem_config),
std::nullopt,
output_mem_config);
grad_result = where(ttnn::ltz(input, output_mem_config), t_nan, grad_result, output_mem_config);
}
grad_tensor.emplace_back(grad_result);
return grad_tensor;
}
std::vector<Tensor> rpow_bw(
const Tensor& grad, const Tensor& input, float exponent, const MemoryConfig& output_mem_config) {
return operation::decorate_as_composite(__func__, _rpow_bw)(grad, input, exponent, output_mem_config);
}

// Autoformat support
Tensor change_layout_to_tile(const Tensor& temp, const MemoryConfig& output_mem_config) {
auto formatted_input_tensor = temp;
Expand Down
16 changes: 0 additions & 16 deletions tt_eager/tt_dnn/op_library/backward/backward_ops.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -101,11 +101,6 @@ std::vector<std::optional<Tensor>> tanh_bw(
std::vector<Tensor> fill_bw(
const Tensor& grad, const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG);

std::vector<Tensor> unary_sub_bw(
const Tensor& grad,
const Tensor& input,
const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG);

std::vector<Tensor> binary_le_bw(
const Tensor& grad,
const Tensor& input,
Expand Down Expand Up @@ -279,17 +274,6 @@ std::vector<Tensor> reciprocal_bw(
const Tensor& input,
const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG);

std::vector<Tensor> rpow_bw(
const Tensor& grad,
const Tensor& input,
float exponent,
const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG);

std::vector<Tensor> square_bw(
const Tensor& grad,
const Tensor& input,
const MemoryConfig& output_mem_config = operation::DEFAULT_OUTPUT_MEMORY_CONFIG);

std::vector<Tensor> prod_bw(
const Tensor& grad,
const Tensor& input,
Expand Down
33 changes: 0 additions & 33 deletions tt_eager/tt_lib/csrc/tt_lib_bindings_tensor_backward_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -791,39 +791,6 @@ namespace tt::tt_metal::detail{
"output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No"
)doc");

m_tensor.def("rpow_bw", &tt::tt_metal::rpow_bw,
py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("exponent").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc(
Performs backward operations for rpow for the ``input`` and ``exponent`` with given ``grad``
Input tensors must have BFLOAT16 data type.
Output tensor will have BFLOAT16 data type.
.. csv-table::
:header: "Argument", "Description", "Data type", "Valid range", "Required"
"grad", "Gradient tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"input", "Tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"exponent", "exponent", "float", ">0.0", "Yes"
"output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No"
)doc");

m_tensor.def("square_bw", &tt::tt_metal::square_bw,
py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc(
Performs backward square operations on ``input`` tensors with given ``grad``.
Input tensors must have BFLOAT16 data type.
Output tensors will have BFLOAT16 data type.
.. csv-table::
:header: "Argument", "Description", "Data type", "Valid range", "Required"
"grad", "Gradient tensor", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"input", "Tensor square_bw is applied to", "Tensor", "Tensor of shape [W, Z, Y, X]", "Yes"
"output_mem_config", "Layout of tensor in TT Accelerator device memory banks", "MemoryConfig", "Default is interleaved in DRAM", "No"
)doc");

m_tensor.def("prod_bw", &tt::tt_metal::prod_bw,
py::arg("grad").noconvert(), py::arg("input").noconvert(), py::arg("all_dimensions") , py::arg("dim") , py::arg("output_mem_config").noconvert() = operation::DEFAULT_OUTPUT_MEMORY_CONFIG, R"doc(
Performs backward operations for prod on ``input_a`` along ``all_dimensions`` or a particular ``dim``.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -358,23 +358,23 @@ std::vector<Tensor> _rpow_bw(

std::vector<Tensor> _floor_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) {
std::vector<Tensor> grad_tensor;
Tensor t_zero = tt::tt_metal::zeros_like(grad, output_mem_config);
Tensor t_zero = ttnn::operations::creation::zeros_like(grad);
grad_tensor.emplace_back(t_zero);
return grad_tensor;
}

std::vector<Tensor> _round_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) {
std::vector<Tensor> grad_tensor;
Tensor t_zero = tt::tt_metal::zeros_like(grad, output_mem_config);
Tensor t_zero = ttnn::operations::creation::zeros_like(grad);
grad_tensor.emplace_back(t_zero);
return grad_tensor;
}

std::vector<Tensor> _log_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) {
std::vector<Tensor> grad_tensor;
Tensor grad_a = ttnn::multiply(grad, ttnn::reciprocal(input, output_mem_config), std::nullopt, output_mem_config);
Tensor t_inf = tt::tt_metal::full_like(input, std::numeric_limits<float>::infinity(), output_mem_config);
Tensor t_nan = tt::tt_metal::full_like(input, std::nanf(""), output_mem_config);
Tensor t_inf = ttnn::operations::creation::full_like(input, std::numeric_limits<float>::infinity());
Tensor t_nan = ttnn::operations::creation::full_like(input, std::nanf(""));
grad_tensor.emplace_back(where(
ttnn::eqz(input, output_mem_config),
where(
Expand All @@ -389,9 +389,9 @@ std::vector<Tensor> _log_bw(const Tensor& grad, const Tensor& input, const Memor

std::vector<Tensor> _relu6_bw(const Tensor& grad, const Tensor& input, const MemoryConfig& output_mem_config) {
std::vector<Tensor> grad_tensor;
Tensor zero_tensor = tt::tt_metal::zeros_like(input, output_mem_config);
Tensor one_tensor = tt::tt_metal::ones_like(input, output_mem_config);
Tensor six_tensor = tt::tt_metal::full_like(input, 6, output_mem_config);
Tensor zero_tensor = ttnn::operations::creation::zeros_like(input);
Tensor one_tensor = ttnn::operations::creation::ones_like(input);
Tensor six_tensor = ttnn::operations::creation::full_like(input, 6);
Tensor grad_result =
where(ttnn::le(input, zero_tensor, std::nullopt, output_mem_config), zero_tensor, six_tensor, output_mem_config);
grad_result = where(
Expand Down Expand Up @@ -423,7 +423,7 @@ std::vector<Tensor> _silu_bw(const Tensor& grad, const Tensor& input, const Memo
std::vector<Tensor> grad_tensor;
Tensor grad_sigmoid = ttnn::multiply(grad, ttnn::sigmoid(input, output_mem_config), std::nullopt, output_mem_config);
Tensor add_sub = ttnn::add(
ttnn::multiply(ttnn::subtract(tt::tt_metal::full_like(input, 1.0f) , ttnn::sigmoid(input, output_mem_config), std::nullopt, output_mem_config),
ttnn::multiply(ttnn::subtract(ttnn::operations::creation::full_like(input, 1.0f) , ttnn::sigmoid(input, output_mem_config), std::nullopt, output_mem_config),
input,
std::nullopt,
output_mem_config),
Expand Down

0 comments on commit fbecddf

Please sign in to comment.