From 4edfd3fc84f9b6765c277486efbd411ea139da1d Mon Sep 17 00:00:00 2001 From: Anasuya G Nair Date: Tue, 26 Nov 2024 13:12:58 +0530 Subject: [PATCH] Update unary doc examples set2 (#15424) ### Ticket #14982 ### Problem description Provided example tensors are inconsistent with the ops. Currently we support device operations for ranks >= 2 as per doc for most ops. However, the example tensors in the docs are 1D tensors. ### What's changed - Updated and tested examples to create 2D tensors in documentation that are consistent with unary ops - Updated supported params - Updated sweep tests for Sinh, cosh, log1p No. of Ops: 22 ### Checklist - [x] [Post commit CI passes](https://github.com/tenstorrent/tt-metal/actions/runs/12012904469) - [x] Run Sweeps https://github.com/tenstorrent/tt-metal/actions/runs/12012919227 https://github.com/tenstorrent/tt-metal/actions/runs/12012924678 https://github.com/tenstorrent/tt-metal/actions/runs/12012930820 --- docs/source/ttnn/ttnn/api.rst | 2 +- .../sweeps/eltwise/unary/cosh/cosh.py | 4 +- .../sweeps/eltwise/unary/log1p/log1p.py | 4 +- .../sweeps/eltwise/unary/sinh/sinh.py | 4 +- .../operations/eltwise/unary/unary_pybind.hpp | 61 +++++++++++-------- 5 files changed, 44 insertions(+), 31 deletions(-) diff --git a/docs/source/ttnn/ttnn/api.rst b/docs/source/ttnn/ttnn/api.rst index 0ae727680e2..7ac786bebb7 100644 --- a/docs/source/ttnn/ttnn/api.rst +++ b/docs/source/ttnn/ttnn/api.rst @@ -93,7 +93,6 @@ Pointwise Unary ttnn.abs ttnn.acos - ttnn.logical_not_ ttnn.acosh ttnn.asin ttnn.asinh @@ -152,6 +151,7 @@ Pointwise Unary ttnn.log2 ttnn.log_sigmoid ttnn.logical_not + ttnn.logical_not_ ttnn.logit ttnn.ltz ttnn.mish diff --git a/tests/sweep_framework/sweeps/eltwise/unary/cosh/cosh.py b/tests/sweep_framework/sweeps/eltwise/unary/cosh/cosh.py index 2c01010ede0..f78f7365c4a 100644 --- a/tests/sweep_framework/sweeps/eltwise/unary/cosh/cosh.py +++ b/tests/sweep_framework/sweeps/eltwise/unary/cosh/cosh.py @@ -35,8 +35,8 @@ # If invalidated, the vector will still be stored but will be skipped. # Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid. def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]: - if test_vector["input_layout"] == ttnn.ROW_MAJOR_LAYOUT or test_vector["input_dtype"] == ttnn.bfloat8_b: - return True, "ROW_MAJOR_LAYOUT and ttnn.bfloat8_b are not supported" + if test_vector["input_layout"] == ttnn.ROW_MAJOR_LAYOUT: + return True, "ROW_MAJOR_LAYOUT is not supported" return False, None diff --git a/tests/sweep_framework/sweeps/eltwise/unary/log1p/log1p.py b/tests/sweep_framework/sweeps/eltwise/unary/log1p/log1p.py index a188e346f78..27b1cd2119e 100644 --- a/tests/sweep_framework/sweeps/eltwise/unary/log1p/log1p.py +++ b/tests/sweep_framework/sweeps/eltwise/unary/log1p/log1p.py @@ -34,8 +34,8 @@ # If invalidated, the vector will still be stored but will be skipped. # Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid. def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]: - if test_vector["input_layout"] == ttnn.ROW_MAJOR_LAYOUT or test_vector["input_dtype"] == ttnn.bfloat8_b: - return True, "Row Major layout and bfloat8_b are not supported" + if test_vector["input_layout"] == ttnn.ROW_MAJOR_LAYOUT: + return True, "Row Major layout is not supported" return False, None diff --git a/tests/sweep_framework/sweeps/eltwise/unary/sinh/sinh.py b/tests/sweep_framework/sweeps/eltwise/unary/sinh/sinh.py index fa08d9ea184..7cf8936d544 100644 --- a/tests/sweep_framework/sweeps/eltwise/unary/sinh/sinh.py +++ b/tests/sweep_framework/sweeps/eltwise/unary/sinh/sinh.py @@ -44,8 +44,8 @@ # If invalidated, the vector will still be stored but will be skipped. # Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid. def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]: - if test_vector["input_layout"] == ttnn.ROW_MAJOR_LAYOUT or test_vector["input_dtype"] == ttnn.bfloat8_b: - return True, "ROW_MAJOR_LAYOUT and ttnn.bfloat8_b are not supported" + if test_vector["input_layout"] == ttnn.ROW_MAJOR_LAYOUT: + return True, "ROW_MAJOR_LAYOUT is not supported" return False, None diff --git a/ttnn/cpp/ttnn/operations/eltwise/unary/unary_pybind.hpp b/ttnn/cpp/ttnn/operations/eltwise/unary/unary_pybind.hpp index f22fb9008f3..cc2ab8a7fd4 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/unary/unary_pybind.hpp +++ b/ttnn/cpp/ttnn/operations/eltwise/unary/unary_pybind.hpp @@ -999,7 +999,16 @@ void bind_power(py::module& module, const unary_operation_t& operation, const st } template -void bind_unary_composite(py::module& module, const unary_operation_t& operation, const std::string& description, const std::string& range = "", const std::string& supported_dtype = "BFLOAT16", const std::string& supported_layout = "TILE", const std::string& supported_rank = "2,3,4", const std::string& info_doc = "") { +void bind_unary_composite( + py::module& module, + const unary_operation_t& operation, + const std::string& description, + const std::string& range = "", + const std::string& supported_dtype = "BFLOAT16", + const std::string& supported_layout = "TILE", + const std::string& supported_rank = "2, 3, 4", + const std::string& note = "", + const std::string& example_tensor = "torch.rand([2, 2], dtype=torch.bfloat16)") { auto doc = fmt::format( R"doc( {2} @@ -1008,7 +1017,7 @@ void bind_unary_composite(py::module& module, const unary_operation_t& operation input_tensor (ttnn.Tensor): the input tensor. {3} Keyword Args: - memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. + memory_config (ttnn.MemoryConfig, optional): memory configuration for the operation. Defaults to `None`. Returns: ttnn.Tensor: the output tensor. @@ -1029,7 +1038,7 @@ void bind_unary_composite(py::module& module, const unary_operation_t& operation {7} Example: - >>> tensor = ttnn.from_torch(torch.tensor((1, 2), dtype=torch.bfloat16), device=device) + >>> tensor = ttnn.from_torch({8}, layout=ttnn.TILE_LAYOUT, device=device) >>> output = {1}(tensor) )doc", operation.base_name(), @@ -1039,7 +1048,8 @@ void bind_unary_composite(py::module& module, const unary_operation_t& operation supported_dtype, supported_layout, supported_rank, - info_doc); + note, + example_tensor); bind_registered_operation( module, @@ -1926,33 +1936,36 @@ void py_module(py::module& module) { detail::bind_power(module, ttnn::pow, R"doc(BFLOAT16, BFLOAT8_B)doc"); // unary composite imported into ttnn - detail::bind_unary_composite(module, ttnn::deg2rad, R"doc(Performs deg2rad function on :attr:`input_tensor`.)doc"); - detail::bind_unary_composite(module, ttnn::rad2deg, R"doc(Performs rad2deg function on :attr:`input_tensor`.)doc"); - detail::bind_unary_composite(module, ttnn::tanhshrink, R"doc(Performs tanhshrink function on :attr:`input_tensor`.)doc"); - detail::bind_unary_composite(module, ttnn::acosh, R"doc(Performs acosh function on :attr:`input_tensor`.)doc", "",R"doc(BFLOAT16)doc",R"doc(TILE)doc", R"doc(2, 3, 4)doc", + detail::bind_unary_composite(module, ttnn::deg2rad, R"doc(Performs deg2rad function on :attr:`input_tensor`.)doc", "", R"doc(BFLOAT16, BFLOAT8_B)doc"); + detail::bind_unary_composite(module, ttnn::rad2deg, R"doc(Performs rad2deg function on :attr:`input_tensor`.)doc", "", R"doc(BFLOAT16, BFLOAT8_B)doc"); + detail::bind_unary_composite(module, ttnn::tanhshrink, R"doc(Performs tanhshrink function on :attr:`input_tensor`.)doc", "", R"doc(BFLOAT16, BFLOAT8_B)doc"); + detail::bind_unary_composite(module, ttnn::acosh, R"doc(Performs acosh function on :attr:`input_tensor`.)doc", "", R"doc(BFLOAT16)doc", R"doc(TILE)doc", R"doc(2, 3, 4)doc", R"doc(System memory is not supported.)doc"); - detail::bind_unary_composite(module, ttnn::asinh, R"doc(Performs asinh function on :attr:`input_tensor`.)doc", "",R"doc(BFLOAT16)doc", R"doc(TILE)doc", R"doc(2, 3, 4)doc", + detail::bind_unary_composite(module, ttnn::asinh, R"doc(Performs asinh function on :attr:`input_tensor`.)doc", "", R"doc(BFLOAT16)doc", R"doc(TILE)doc", R"doc(2, 3, 4)doc", R"doc(System memory is not supported.)doc"); - detail::bind_unary_composite(module, ttnn::atanh, R"doc(Performs atanh function on :attr:`input_tensor`.)doc", "",R"doc(BFLOAT16)doc", R"doc(TILE)doc", R"doc(2, 3, 4)doc", + detail::bind_unary_composite(module, ttnn::atanh, R"doc(Performs atanh function on :attr:`input_tensor`.)doc", "", R"doc(BFLOAT16)doc", R"doc(TILE)doc", R"doc(2, 3, 4)doc", R"doc(System memory is not supported.)doc"); detail::bind_unary_composite(module, ttnn::cbrt, R"doc(Performs cbrt function on :attr:`input_tensor`.)doc"); - detail::bind_unary_composite(module, ttnn::cosh, R"doc(Performs cosh function on :attr:`input_tensor`.)doc", "[supported range -9 to 9]",R"doc(BFLOAT16)doc"); - detail::bind_unary_composite(module, ttnn::digamma, R"doc(Performs digamma function on :attr:`input_tensor`.)doc", "[supported for value greater than 0]"); - detail::bind_unary_composite(module, ttnn::lgamma, R"doc(Performs lgamma function on :attr:`input_tensor`.)doc", "[supported for value greater than 0]", R"doc(BFLOAT16)doc"); - detail::bind_unary_composite(module, ttnn::log1p, R"doc(Performs log1p function on :attr:`input_tensor`.)doc", "[supported range -1 to 1]", R"doc(BFLOAT16)doc"); - detail::bind_unary_composite(module, ttnn::mish, R"doc(Performs mish function on :attr:`input_tensor`.)doc", "[supported range -20 to inf]", R"doc(BFLOAT16, BFLOAT8_B)doc", R"doc(TILE)doc", R"doc(2, 3, 4)doc", - R"doc(Not supported on Grayskull.)doc"); - detail::bind_unary_composite(module, ttnn::multigammaln, R"doc(Performs multigammaln function on :attr:`input_tensor`.)doc", "[supported range 1.6 to inf]", R"doc(BFLOAT16)doc"); - - detail::bind_unary_composite(module, ttnn::sinh, R"doc(Performs sinh function on :attr:`input_tensor`.)doc", "[supported range -88 to 88]", R"doc(BFLOAT16)doc"); + detail::bind_unary_composite(module, ttnn::cosh, R"doc(Performs cosh function on :attr:`input_tensor`.)doc", "[supported range -9 to 9]", R"doc(BFLOAT16, BFLOAT8_B)doc"); + detail::bind_unary_composite(module, ttnn::digamma, R"doc(Performs digamma function on :attr:`input_tensor`.)doc", "[supported for values greater than 0].", + R"doc(BFLOAT16, BFLOAT8_B)doc", R"doc(TILE)doc", R"doc(2, 3, 4)doc", "", R"doc(torch.tensor([[2, 3], [4, 5]], dtype=torch.bfloat16))doc"); + detail::bind_unary_composite(module, ttnn::lgamma, R"doc(Performs lgamma function on :attr:`input_tensor`.)doc", "[supported for value greater than 0].", R"doc(BFLOAT16)doc"); + detail::bind_unary_composite(module, ttnn::log1p, R"doc(Performs log1p function on :attr:`input_tensor`.)doc", "[supported range -1 to 1].", R"doc(BFLOAT16, BFLOAT8_B)doc"); + detail::bind_unary_composite(module, ttnn::mish, R"doc(Performs mish function on :attr:`input_tensor`.)doc", "[supported range -20 to inf].", R"doc(BFLOAT16, BFLOAT8_B)doc", + R"doc(TILE)doc", R"doc(2, 3, 4)doc", R"doc(Not supported on Grayskull.)doc"); + detail::bind_unary_composite(module, ttnn::multigammaln, R"doc(Performs multigammaln function on :attr:`input_tensor`.)doc", "[supported range 1.6 to inf].", R"doc(BFLOAT16)doc", + R"doc(TILE)doc", R"doc(2, 3, 4)doc", "", R"doc(torch.tensor([[2, 3], [4, 5]], dtype=torch.bfloat16))doc"); + detail::bind_unary_composite(module, ttnn::sinh, R"doc(Performs sinh function on :attr:`input_tensor`.)doc", "[supported range -9 to 9].", R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_composite(module, ttnn::softsign, R"doc(Performs softsign function on :attr:`input_tensor`.)doc"); - detail::bind_unary_composite(module, ttnn::swish, R"doc(Performs swish function on :attr:`input_tensor`.)doc"); + detail::bind_unary_composite(module, ttnn::swish, R"doc(Performs swish function on :attr:`input_tensor`.)doc", "", R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_composite(module, ttnn::var_hw, R"doc(Performs var_hw function on :attr:`input_tensor`.)doc"); detail::bind_unary_composite(module, ttnn::std_hw, R"doc(Performs std_hw function on :attr:`input_tensor`.)doc"); - detail::bind_unary_composite(module, ttnn::normalize_hw, R"doc(Performs normalize_hw function on :attr:`input_tensor`.)doc", "",R"doc(BFLOAT16)doc", R"doc(ROW_MAJOR, TILE)doc", R"doc(4)doc"); - detail::bind_unary_composite(module, ttnn::logical_not_, R"doc(Performs logical_not inplace function on :attr:`input_tensor`.)doc"); - detail::bind_unary_composite(module, ttnn::normalize_global, R"doc(Performs normalize_global function on :attr:`input_tensor`.)doc", "",R"doc(BFLOAT16)doc", R"doc(ROW_MAJOR, TILE)doc", R"doc(4)doc"); - detail::bind_unary_composite(module, ttnn::frac, R"doc(Performs frac function on :attr:`input_tensor`.)doc"); + detail::bind_unary_composite(module, ttnn::normalize_hw, R"doc(Performs normalize_hw function on :attr:`input_tensor`.)doc", "", R"doc(BFLOAT16)doc", R"doc(ROW_MAJOR, TILE)doc", + R"doc(4)doc", "", R"doc(torch.rand([1, 1, 32, 32], dtype=torch.bfloat16))doc"); + detail::bind_unary_composite(module, ttnn::logical_not_, R"doc(Performs logical_not inplace function on :attr:`input_tensor`.)doc", "", R"doc(BFLOAT16, BFLOAT8_B)doc"); + detail::bind_unary_composite(module, ttnn::normalize_global, R"doc(Performs normalize_global function on :attr:`input_tensor`.)doc", "", R"doc(BFLOAT16)doc", + R"doc(ROW_MAJOR, TILE)doc", R"doc(4)doc", "", R"doc(torch.rand([1, 1, 32, 32], dtype=torch.bfloat16))doc"); + detail::bind_unary_composite(module, ttnn::frac, R"doc(Performs frac function on :attr:`input_tensor`.)doc", "", R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_composite_operation(module, ttnn::trunc, R"doc(Not supported for grayskull.)doc"); detail::bind_unary_composite_floats_with_default(