diff --git a/.github/workflows/ttnn-run-sweeps.yaml b/.github/workflows/ttnn-run-sweeps.yaml index 3e8d9861344..2ce0a629e44 100644 --- a/.github/workflows/ttnn-run-sweeps.yaml +++ b/.github/workflows/ttnn-run-sweeps.yaml @@ -233,7 +233,7 @@ on: - eltwise.binary.ne.ne_scalar_pytorch2 - eltwise.binary.hypot.hypot - eltwise.binary.xlogy.xlogy - - eltwise.binary_backward.ldexp_bw + - eltwise.binary_backward.ldexp_bw.ldexp_bw - eltwise.binary_backward.logaddexp_bw - eltwise.binary_backward.logaddexp2_bw - eltwise.binary_backward.addalpha_bw.addalpha_bw diff --git a/tests/sweep_framework/sweeps/eltwise/binary_backward/ldexp_bw.py b/tests/sweep_framework/sweeps/eltwise/binary_backward/ldexp_bw/ldexp_bw.py similarity index 93% rename from tests/sweep_framework/sweeps/eltwise/binary_backward/ldexp_bw.py rename to tests/sweep_framework/sweeps/eltwise/binary_backward/ldexp_bw/ldexp_bw.py index 55296b9305b..5a15294779c 100644 --- a/tests/sweep_framework/sweeps/eltwise/binary_backward/ldexp_bw.py +++ b/tests/sweep_framework/sweeps/eltwise/binary_backward/ldexp_bw/ldexp_bw.py @@ -14,9 +14,6 @@ from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time from models.utility_functions import torch_random -# Override the default timeout in seconds for hang detection. -TIMEOUT = 30 - random.seed(0) @@ -69,21 +66,18 @@ def run( *, device, ) -> list: - data_seed = random.randint(0, 20000000) - torch.manual_seed(data_seed) - torch_grad_tensor = gen_func_with_cast_tt(partial(torch_random, low=-10, high=10, dtype=torch.float32), grad_dtype)( input_shape ) torch_input_tensor_a = gen_func_with_cast_tt( - partial(torch_random, low=-100, high=100, dtype=torch.float32), input_a_dtype + partial(torch_random, low=-80, high=80, dtype=torch.float32), input_a_dtype )(input_shape) torch_input_tensor_a.requires_grad = True torch_input_tensor_a.retain_grad() torch_input_tensor_b = gen_func_with_cast_tt( - partial(torch_random, low=-100, high=100, dtype=torch.float32), input_b_dtype + partial(torch_random, low=-80, high=80, dtype=torch.float32), input_b_dtype )(input_shape) torch_input_tensor_b.requires_grad = True torch_input_tensor_b.retain_grad() diff --git a/ttnn/cpp/ttnn/operations/eltwise/binary_backward/binary_backward_pybind.hpp b/ttnn/cpp/ttnn/operations/eltwise/binary_backward/binary_backward_pybind.hpp index b2cb39ff665..0cdb5d08995 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/binary_backward/binary_backward_pybind.hpp +++ b/ttnn/cpp/ttnn/operations/eltwise/binary_backward/binary_backward_pybind.hpp @@ -22,7 +22,7 @@ namespace binary_backward { namespace detail { template -void bind_binary_backward_ops(py::module& module, const binary_backward_operation_t& operation, const std::string_view description, const std::string_view supported_dtype = "BFLOAT16") { +void bind_binary_backward_ops(py::module& module, const binary_backward_operation_t& operation, const std::string_view description, const std::string_view supported_dtype = "BFLOAT16", const std::string_view note = "") { auto doc = fmt::format( R"doc( {2} @@ -53,6 +53,8 @@ void bind_binary_backward_ops(py::module& module, const binary_backward_operatio bfloat8_b/bfloat4_b is only supported on TILE_LAYOUT + {4} + Example: >>> grad_tensor = ttnn.from_torch(torch.tensor([[1, 2], [3, 4]], dtype=torch.bfloat16), layout=ttnn.TILE_LAYOUT, device=device) >>> tensor1 = ttnn.from_torch(torch.tensor([[1, 2], [3, 4]], dtype=torch.bfloat16, requires_grad=True), layout=ttnn.TILE_LAYOUT, device=device) @@ -65,7 +67,8 @@ void bind_binary_backward_ops(py::module& module, const binary_backward_operatio operation.base_name(), operation.python_fully_qualified_name(), description, - supported_dtype); + supported_dtype, + note); bind_registered_operation( module, @@ -1115,7 +1118,7 @@ void py_module(py::module& module) { module, ttnn::ldexp_bw, R"doc(Performs backward operations for ldexp of :attr:`input_tensor_a` and :attr:`input_tensor_b` with given :attr:`grad_tensor`.)doc", - R"doc(BFLOAT16)doc"); + R"doc(BFLOAT16)doc", R"doc(Recommended input range : [-80, 80]. Performance of the PCC may degrade if the input falls outside this range.)doc"); detail::bind_binary_backward_ops(