Skip to content

Commit

Permalink
Update unary doc examples set2 (#15424)
Browse files Browse the repository at this point in the history
### Ticket
#14982 

### Problem description
Provided example tensors are inconsistent with the ops.
Currently we support device operations for ranks >= 2 as per doc for
most ops. However, the example tensors in the docs are 1D tensors.

### What's changed
- Updated and tested examples to create 2D tensors in documentation that
are consistent with unary ops
- Updated supported params
- Updated sweep tests for Sinh, cosh, log1p

No. of Ops: 22

### Checklist
- [x] [Post commit CI
passes](https://github.com/tenstorrent/tt-metal/actions/runs/12012904469)
- [x] Run Sweeps
https://github.com/tenstorrent/tt-metal/actions/runs/12012919227
https://github.com/tenstorrent/tt-metal/actions/runs/12012924678
https://github.com/tenstorrent/tt-metal/actions/runs/12012930820
  • Loading branch information
mcw-anasuya authored Nov 26, 2024
1 parent 78075c6 commit 4edfd3f
Show file tree
Hide file tree
Showing 5 changed files with 44 additions and 31 deletions.
2 changes: 1 addition & 1 deletion docs/source/ttnn/ttnn/api.rst
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,6 @@ Pointwise Unary

ttnn.abs
ttnn.acos
ttnn.logical_not_
ttnn.acosh
ttnn.asin
ttnn.asinh
Expand Down Expand Up @@ -152,6 +151,7 @@ Pointwise Unary
ttnn.log2
ttnn.log_sigmoid
ttnn.logical_not
ttnn.logical_not_
ttnn.logit
ttnn.ltz
ttnn.mish
Expand Down
4 changes: 2 additions & 2 deletions tests/sweep_framework/sweeps/eltwise/unary/cosh/cosh.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,8 @@
# If invalidated, the vector will still be stored but will be skipped.
# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid.
def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]:
if test_vector["input_layout"] == ttnn.ROW_MAJOR_LAYOUT or test_vector["input_dtype"] == ttnn.bfloat8_b:
return True, "ROW_MAJOR_LAYOUT and ttnn.bfloat8_b are not supported"
if test_vector["input_layout"] == ttnn.ROW_MAJOR_LAYOUT:
return True, "ROW_MAJOR_LAYOUT is not supported"
return False, None


Expand Down
4 changes: 2 additions & 2 deletions tests/sweep_framework/sweeps/eltwise/unary/log1p/log1p.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@
# If invalidated, the vector will still be stored but will be skipped.
# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid.
def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]:
if test_vector["input_layout"] == ttnn.ROW_MAJOR_LAYOUT or test_vector["input_dtype"] == ttnn.bfloat8_b:
return True, "Row Major layout and bfloat8_b are not supported"
if test_vector["input_layout"] == ttnn.ROW_MAJOR_LAYOUT:
return True, "Row Major layout is not supported"
return False, None


Expand Down
4 changes: 2 additions & 2 deletions tests/sweep_framework/sweeps/eltwise/unary/sinh/sinh.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@
# If invalidated, the vector will still be stored but will be skipped.
# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid.
def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]:
if test_vector["input_layout"] == ttnn.ROW_MAJOR_LAYOUT or test_vector["input_dtype"] == ttnn.bfloat8_b:
return True, "ROW_MAJOR_LAYOUT and ttnn.bfloat8_b are not supported"
if test_vector["input_layout"] == ttnn.ROW_MAJOR_LAYOUT:
return True, "ROW_MAJOR_LAYOUT is not supported"
return False, None


Expand Down
61 changes: 37 additions & 24 deletions ttnn/cpp/ttnn/operations/eltwise/unary/unary_pybind.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -999,7 +999,16 @@ void bind_power(py::module& module, const unary_operation_t& operation, const st
}

template <typename unary_operation_t>
void bind_unary_composite(py::module& module, const unary_operation_t& operation, const std::string& description, const std::string& range = "", const std::string& supported_dtype = "BFLOAT16", const std::string& supported_layout = "TILE", const std::string& supported_rank = "2,3,4", const std::string& info_doc = "") {
void bind_unary_composite(
py::module& module,
const unary_operation_t& operation,
const std::string& description,
const std::string& range = "",
const std::string& supported_dtype = "BFLOAT16",
const std::string& supported_layout = "TILE",
const std::string& supported_rank = "2, 3, 4",
const std::string& note = "",
const std::string& example_tensor = "torch.rand([2, 2], dtype=torch.bfloat16)") {
auto doc = fmt::format(
R"doc(
{2}
Expand All @@ -1008,7 +1017,7 @@ void bind_unary_composite(py::module& module, const unary_operation_t& operation
input_tensor (ttnn.Tensor): the input tensor. {3}
Keyword Args:
memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`.
memory_config (ttnn.MemoryConfig, optional): memory configuration for the operation. Defaults to `None`.
Returns:
ttnn.Tensor: the output tensor.
Expand All @@ -1029,7 +1038,7 @@ void bind_unary_composite(py::module& module, const unary_operation_t& operation
{7}
Example:
>>> tensor = ttnn.from_torch(torch.tensor((1, 2), dtype=torch.bfloat16), device=device)
>>> tensor = ttnn.from_torch({8}, layout=ttnn.TILE_LAYOUT, device=device)
>>> output = {1}(tensor)
)doc",
operation.base_name(),
Expand All @@ -1039,7 +1048,8 @@ void bind_unary_composite(py::module& module, const unary_operation_t& operation
supported_dtype,
supported_layout,
supported_rank,
info_doc);
note,
example_tensor);

bind_registered_operation(
module,
Expand Down Expand Up @@ -1926,33 +1936,36 @@ void py_module(py::module& module) {
detail::bind_power(module, ttnn::pow, R"doc(BFLOAT16, BFLOAT8_B)doc");

// unary composite imported into ttnn
detail::bind_unary_composite(module, ttnn::deg2rad, R"doc(Performs deg2rad function on :attr:`input_tensor`.)doc");
detail::bind_unary_composite(module, ttnn::rad2deg, R"doc(Performs rad2deg function on :attr:`input_tensor`.)doc");
detail::bind_unary_composite(module, ttnn::tanhshrink, R"doc(Performs tanhshrink function on :attr:`input_tensor`.)doc");
detail::bind_unary_composite(module, ttnn::acosh, R"doc(Performs acosh function on :attr:`input_tensor`.)doc", "",R"doc(BFLOAT16)doc",R"doc(TILE)doc", R"doc(2, 3, 4)doc",
detail::bind_unary_composite(module, ttnn::deg2rad, R"doc(Performs deg2rad function on :attr:`input_tensor`.)doc", "", R"doc(BFLOAT16, BFLOAT8_B)doc");
detail::bind_unary_composite(module, ttnn::rad2deg, R"doc(Performs rad2deg function on :attr:`input_tensor`.)doc", "", R"doc(BFLOAT16, BFLOAT8_B)doc");
detail::bind_unary_composite(module, ttnn::tanhshrink, R"doc(Performs tanhshrink function on :attr:`input_tensor`.)doc", "", R"doc(BFLOAT16, BFLOAT8_B)doc");
detail::bind_unary_composite(module, ttnn::acosh, R"doc(Performs acosh function on :attr:`input_tensor`.)doc", "", R"doc(BFLOAT16)doc", R"doc(TILE)doc", R"doc(2, 3, 4)doc",
R"doc(System memory is not supported.)doc");
detail::bind_unary_composite(module, ttnn::asinh, R"doc(Performs asinh function on :attr:`input_tensor`.)doc", "",R"doc(BFLOAT16)doc", R"doc(TILE)doc", R"doc(2, 3, 4)doc",
detail::bind_unary_composite(module, ttnn::asinh, R"doc(Performs asinh function on :attr:`input_tensor`.)doc", "", R"doc(BFLOAT16)doc", R"doc(TILE)doc", R"doc(2, 3, 4)doc",
R"doc(System memory is not supported.)doc");
detail::bind_unary_composite(module, ttnn::atanh, R"doc(Performs atanh function on :attr:`input_tensor`.)doc", "",R"doc(BFLOAT16)doc", R"doc(TILE)doc", R"doc(2, 3, 4)doc",
detail::bind_unary_composite(module, ttnn::atanh, R"doc(Performs atanh function on :attr:`input_tensor`.)doc", "", R"doc(BFLOAT16)doc", R"doc(TILE)doc", R"doc(2, 3, 4)doc",
R"doc(System memory is not supported.)doc");
detail::bind_unary_composite(module, ttnn::cbrt, R"doc(Performs cbrt function on :attr:`input_tensor`.)doc");
detail::bind_unary_composite(module, ttnn::cosh, R"doc(Performs cosh function on :attr:`input_tensor`.)doc", "[supported range -9 to 9]",R"doc(BFLOAT16)doc");
detail::bind_unary_composite(module, ttnn::digamma, R"doc(Performs digamma function on :attr:`input_tensor`.)doc", "[supported for value greater than 0]");
detail::bind_unary_composite(module, ttnn::lgamma, R"doc(Performs lgamma function on :attr:`input_tensor`.)doc", "[supported for value greater than 0]", R"doc(BFLOAT16)doc");
detail::bind_unary_composite(module, ttnn::log1p, R"doc(Performs log1p function on :attr:`input_tensor`.)doc", "[supported range -1 to 1]", R"doc(BFLOAT16)doc");
detail::bind_unary_composite(module, ttnn::mish, R"doc(Performs mish function on :attr:`input_tensor`.)doc", "[supported range -20 to inf]", R"doc(BFLOAT16, BFLOAT8_B)doc", R"doc(TILE)doc", R"doc(2, 3, 4)doc",
R"doc(Not supported on Grayskull.)doc");
detail::bind_unary_composite(module, ttnn::multigammaln, R"doc(Performs multigammaln function on :attr:`input_tensor`.)doc", "[supported range 1.6 to inf]", R"doc(BFLOAT16)doc");

detail::bind_unary_composite(module, ttnn::sinh, R"doc(Performs sinh function on :attr:`input_tensor`.)doc", "[supported range -88 to 88]", R"doc(BFLOAT16)doc");
detail::bind_unary_composite(module, ttnn::cosh, R"doc(Performs cosh function on :attr:`input_tensor`.)doc", "[supported range -9 to 9]", R"doc(BFLOAT16, BFLOAT8_B)doc");
detail::bind_unary_composite(module, ttnn::digamma, R"doc(Performs digamma function on :attr:`input_tensor`.)doc", "[supported for values greater than 0].",
R"doc(BFLOAT16, BFLOAT8_B)doc", R"doc(TILE)doc", R"doc(2, 3, 4)doc", "", R"doc(torch.tensor([[2, 3], [4, 5]], dtype=torch.bfloat16))doc");
detail::bind_unary_composite(module, ttnn::lgamma, R"doc(Performs lgamma function on :attr:`input_tensor`.)doc", "[supported for value greater than 0].", R"doc(BFLOAT16)doc");
detail::bind_unary_composite(module, ttnn::log1p, R"doc(Performs log1p function on :attr:`input_tensor`.)doc", "[supported range -1 to 1].", R"doc(BFLOAT16, BFLOAT8_B)doc");
detail::bind_unary_composite(module, ttnn::mish, R"doc(Performs mish function on :attr:`input_tensor`.)doc", "[supported range -20 to inf].", R"doc(BFLOAT16, BFLOAT8_B)doc",
R"doc(TILE)doc", R"doc(2, 3, 4)doc", R"doc(Not supported on Grayskull.)doc");
detail::bind_unary_composite(module, ttnn::multigammaln, R"doc(Performs multigammaln function on :attr:`input_tensor`.)doc", "[supported range 1.6 to inf].", R"doc(BFLOAT16)doc",
R"doc(TILE)doc", R"doc(2, 3, 4)doc", "", R"doc(torch.tensor([[2, 3], [4, 5]], dtype=torch.bfloat16))doc");
detail::bind_unary_composite(module, ttnn::sinh, R"doc(Performs sinh function on :attr:`input_tensor`.)doc", "[supported range -9 to 9].", R"doc(BFLOAT16, BFLOAT8_B)doc");
detail::bind_unary_composite(module, ttnn::softsign, R"doc(Performs softsign function on :attr:`input_tensor`.)doc");
detail::bind_unary_composite(module, ttnn::swish, R"doc(Performs swish function on :attr:`input_tensor`.)doc");
detail::bind_unary_composite(module, ttnn::swish, R"doc(Performs swish function on :attr:`input_tensor`.)doc", "", R"doc(BFLOAT16, BFLOAT8_B)doc");
detail::bind_unary_composite(module, ttnn::var_hw, R"doc(Performs var_hw function on :attr:`input_tensor`.)doc");
detail::bind_unary_composite(module, ttnn::std_hw, R"doc(Performs std_hw function on :attr:`input_tensor`.)doc");
detail::bind_unary_composite(module, ttnn::normalize_hw, R"doc(Performs normalize_hw function on :attr:`input_tensor`.)doc", "",R"doc(BFLOAT16)doc", R"doc(ROW_MAJOR, TILE)doc", R"doc(4)doc");
detail::bind_unary_composite(module, ttnn::logical_not_, R"doc(Performs logical_not inplace function on :attr:`input_tensor`.)doc");
detail::bind_unary_composite(module, ttnn::normalize_global, R"doc(Performs normalize_global function on :attr:`input_tensor`.)doc", "",R"doc(BFLOAT16)doc", R"doc(ROW_MAJOR, TILE)doc", R"doc(4)doc");
detail::bind_unary_composite(module, ttnn::frac, R"doc(Performs frac function on :attr:`input_tensor`.)doc");
detail::bind_unary_composite(module, ttnn::normalize_hw, R"doc(Performs normalize_hw function on :attr:`input_tensor`.)doc", "", R"doc(BFLOAT16)doc", R"doc(ROW_MAJOR, TILE)doc",
R"doc(4)doc", "", R"doc(torch.rand([1, 1, 32, 32], dtype=torch.bfloat16))doc");
detail::bind_unary_composite(module, ttnn::logical_not_, R"doc(Performs logical_not inplace function on :attr:`input_tensor`.)doc", "", R"doc(BFLOAT16, BFLOAT8_B)doc");
detail::bind_unary_composite(module, ttnn::normalize_global, R"doc(Performs normalize_global function on :attr:`input_tensor`.)doc", "", R"doc(BFLOAT16)doc",
R"doc(ROW_MAJOR, TILE)doc", R"doc(4)doc", "", R"doc(torch.rand([1, 1, 32, 32], dtype=torch.bfloat16))doc");
detail::bind_unary_composite(module, ttnn::frac, R"doc(Performs frac function on :attr:`input_tensor`.)doc", "", R"doc(BFLOAT16, BFLOAT8_B)doc");
detail::bind_unary_composite_operation(module, ttnn::trunc, R"doc(Not supported for grayskull.)doc");

detail::bind_unary_composite_floats_with_default(
Expand Down

0 comments on commit 4edfd3f

Please sign in to comment.