Skip to content

Commit

Permalink
#8681: Add Floor, Trunc blocker ops (#9098)
Browse files Browse the repository at this point in the history
* #8681: Add div_trunc op

* #8681: Add frac op

* #8681: Add rdiv_trunc op

* #8681: Add rfloor_div op

* #8681: Add ceil op

* #8681: Replace ops in unary comparsion bw with ttnn ops

* #8681: Replace ops in trunc_bw with ttnn ops

* #8681: Replace ops in fill_zero_bw with ttnn ops

* #8681: Replace ops in celu_bw with ttnn ops

* #8681: Replace ops in hardshrink_bw with ttnn ops

* #8681: Replace ops in leaky_relu_bw with ttnn ops

* #8681: Replace ops in hardsigmoid_bw with ttnn ops

* #8681: Replace ops in elu_bw with ttnn ops

* #8681: Replace ops in atanh_bw with ttnn ops

---------

Co-authored-by: mouliraj-mcw <[email protected]>
Co-authored-by: mcw-anasuya <[email protected]>
  • Loading branch information
3 people authored Jul 12, 2024
1 parent 69fcf8b commit 9756f18
Show file tree
Hide file tree
Showing 31 changed files with 754 additions and 38 deletions.
10 changes: 10 additions & 0 deletions docs/source/ttnn/ttnn/dependencies/tt_lib.rst
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,10 @@ Tensor elementwise operations

.. autofunction:: tt_lib.tensor.div

.. autofunction:: tt_lib.tensor.div_trunc

.. autofunction:: tt_lib.tensor.unary_rdiv_trunc

.. autofunction:: tt_lib.tensor.div_no_nan

.. autofunction:: tt_lib.tensor.add_unary
Expand Down Expand Up @@ -454,12 +458,18 @@ Tensor elementwise operations

.. autofunction:: tt_lib.tensor.floor

.. autofunction:: tt_lib.tensor.ceil

.. autofunction:: tt_lib.tensor.trunc

.. autofunction:: tt_lib.tensor.frac

.. autofunction:: tt_lib.tensor.round

.. autofunction:: tt_lib.tensor.floor_div

.. autofunction:: tt_lib.tensor.rfloor_div

Tensor relational operations
============================
.. autofunction:: tt_lib.tensor.gtz
Expand Down
24 changes: 24 additions & 0 deletions tests/tt_eager/python_api_testing/sweep_tests/op_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -268,6 +268,18 @@
"tt_op": tt_lib_ops.eltwise_div,
"pytorch_op": pytorch_ops.div,
},
"eltwise-div_trunc": {
"tt_op": tt_lib_ops.eltwise_div_trunc,
"pytorch_op": pytorch_ops.div_trunc,
},
"eltwise-unary_div_trunc": {
"tt_op": tt_lib_ops.eltwise_unary_div_trunc,
"pytorch_op": pytorch_ops.unary_div_trunc,
},
"eltwise-unary_rdiv_trunc": {
"tt_op": tt_lib_ops.eltwise_unary_rdiv_trunc,
"pytorch_op": pytorch_ops.unary_rdiv_trunc,
},
"eltwise-div_no_nan": {
"tt_op": tt_lib_ops.eltwise_div_no_nan,
"pytorch_op": pytorch_ops.div_no_nan,
Expand Down Expand Up @@ -620,10 +632,18 @@
"tt_op": tt_lib_ops.eltwise_floor,
"pytorch_op": pytorch_ops.floor,
},
"eltwise-ceil": {
"tt_op": tt_lib_ops.eltwise_ceil,
"pytorch_op": pytorch_ops.ceil,
},
"eltwise-trunc": {
"tt_op": tt_lib_ops.eltwise_trunc,
"pytorch_op": pytorch_ops.trunc,
},
"eltwise-frac": {
"tt_op": tt_lib_ops.eltwise_frac,
"pytorch_op": pytorch_ops.frac,
},
"eltwise-floor_div": {
"tt_op": tt_lib_ops.eltwise_floor_div,
"pytorch_op": pytorch_ops.floor_div,
Expand All @@ -632,6 +652,10 @@
"tt_op": tt_lib_ops.eltwise_unary_floor_div,
"pytorch_op": pytorch_ops.unary_floor_div,
},
"eltwise-rfloor_div": {
"tt_op": tt_lib_ops.eltwise_rfloor_div,
"pytorch_op": pytorch_ops.rfloor_div,
},
"eltwise-round": {
"tt_op": tt_lib_ops.eltwise_round,
"pytorch_op": pytorch_ops.round,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
# SPDX-FileCopyrightText: © 2023-24 Tenstorrent Inc.

# SPDX-License-Identifier: Apache-2.0

import pytest
import torch
import random
from functools import partial
import tt_lib as ttl


from tests.tt_eager.python_api_testing.sweep_tests import (
comparison_funcs,
generation_funcs,
)
from tests.tt_eager.python_api_testing.sweep_tests.run_pytorch_ci_tests import (
run_single_pytorch_test,
)
from models.utility_functions import skip_for_grayskull

mem_configs = [
ttl.tensor.MemoryConfig(ttl.tensor.TensorMemoryLayout.INTERLEAVED, ttl.tensor.BufferType.DRAM),
ttl.tensor.MemoryConfig(ttl.tensor.TensorMemoryLayout.INTERLEAVED, ttl.tensor.BufferType.L1),
]


@pytest.mark.parametrize(
"input_shapes",
[
[[1, 1, 32, 32], [1, 1, 32, 32]],
[[1, 1, 320, 384], [1, 1, 320, 384]],
[[1, 3, 320, 384], [1, 3, 320, 384]],
],
)
@pytest.mark.parametrize(
"dst_mem_config",
mem_configs,
)
@skip_for_grayskull("#ToDo: GS implementation needs to be done for floor")
class TestDivTrunc:
def test_run_div_trunc(
self,
input_shapes,
dst_mem_config,
device,
):
datagen_func = [
generation_funcs.gen_func_with_cast(partial(generation_funcs.gen_rand, low=-1e6, high=1e6), torch.bfloat16)
] + [
generation_funcs.gen_func_with_cast(partial(generation_funcs.gen_rand, low=-1e6, high=1e6), torch.bfloat16)
]
test_args = generation_funcs.gen_default_dtype_layout_device(input_shapes)[0]
test_args.update({"output_mem_config": dst_mem_config})
comparison_func = comparison_funcs.comp_pcc

run_single_pytorch_test(
"eltwise-div_trunc",
input_shapes,
datagen_func,
comparison_func,
device,
test_args,
)
Original file line number Diff line number Diff line change
Expand Up @@ -585,7 +585,7 @@ def test_run_eltwise_sign_ops(
test_args,
)

@pytest.mark.parametrize("round_off_method", ["floor", "trunc"])
@pytest.mark.parametrize("round_off_method", ["floor", "ceil", "trunc"])
@skip_for_grayskull("#ToDo: GS implementation needs to be done for Floor")
def test_run_eltwise_round_off_ops(
self,
Expand All @@ -597,9 +597,7 @@ def test_run_eltwise_round_off_ops(
output_mem_config,
):
datagen_func = [
generation_funcs.gen_func_with_cast(
partial(generation_funcs.gen_rand, low=-1000, high=1000), torch.bfloat16
)
generation_funcs.gen_func_with_cast(partial(generation_funcs.gen_rand, low=-1e6, high=1e6), torch.bfloat16)
]
test_args = generation_funcs.gen_default_dtype_layout_device(input_shapes)[0]
test_args.update(
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
# SPDX-FileCopyrightText: © 2023-24 Tenstorrent Inc.

# SPDX-License-Identifier: Apache-2.0

import pytest
import torch
import random
from functools import partial
import tt_lib as ttl
from models.utility_functions import skip_for_grayskull

from tests.tt_eager.python_api_testing.sweep_tests import (
comparison_funcs,
generation_funcs,
)
from tests.tt_eager.python_api_testing.sweep_tests.run_pytorch_ci_tests import (
run_single_pytorch_test,
)

mem_configs = [
ttl.tensor.MemoryConfig(ttl.tensor.TensorMemoryLayout.INTERLEAVED, ttl.tensor.BufferType.DRAM),
ttl.tensor.MemoryConfig(ttl.tensor.TensorMemoryLayout.INTERLEAVED, ttl.tensor.BufferType.L1),
]


@pytest.mark.parametrize(
"input_shapes",
[
[[1, 1, 32, 32]],
[[1, 1, 320, 384]],
[[1, 3, 320, 384]],
],
)
@pytest.mark.parametrize(
"dst_mem_config",
mem_configs,
)
@skip_for_grayskull("#ToDo: GS implementation needs to be done")
class TestFrac:
def test_run_frac(
self,
input_shapes,
dst_mem_config,
device,
):
datagen_func = [
generation_funcs.gen_func_with_cast(partial(generation_funcs.gen_rand, low=-1e6, high=1e6), torch.bfloat16)
]
test_args = generation_funcs.gen_default_dtype_layout_device(input_shapes)[0]
test_args.update({"output_mem_config": dst_mem_config})
comparison_func = comparison_funcs.comp_pcc

run_single_pytorch_test(
"eltwise-frac",
input_shapes,
datagen_func,
comparison_func,
device,
test_args,
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
# SPDX-FileCopyrightText: © 2023-24 Tenstorrent Inc.

# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
import random
import numpy as np
from functools import partial
import tt_lib as ttl
from tests.tt_eager.python_api_testing.sweep_tests import (
comparison_funcs,
generation_funcs,
)
from tests.tt_eager.python_api_testing.sweep_tests.run_pytorch_ci_tests import (
run_single_pytorch_test,
)
from models.utility_functions import skip_for_grayskull

mem_configs = [
ttl.tensor.MemoryConfig(ttl.tensor.TensorMemoryLayout.INTERLEAVED, ttl.tensor.BufferType.DRAM),
ttl.tensor.MemoryConfig(ttl.tensor.TensorMemoryLayout.INTERLEAVED, ttl.tensor.BufferType.L1),
]


@pytest.mark.parametrize(
"input_shapes",
[
[[1, 1, 32, 32], [1, 1, 32, 32]],
[[1, 1, 320, 384], [1, 1, 320, 384]],
[[1, 3, 320, 384], [1, 3, 320, 384]],
],
)
@pytest.mark.parametrize(
"dst_mem_config",
mem_configs,
)
@skip_for_grayskull("#ToDo: GS implementation needs to be done for floor")
class TestRfloor_div:
def test_run_rfloor_div(
self,
input_shapes,
dst_mem_config,
device,
):
datagen_func = [
generation_funcs.gen_func_with_cast(partial(generation_funcs.gen_rand, low=-1e6, high=1e6), torch.bfloat16)
]
test_args = generation_funcs.gen_default_dtype_layout_device(input_shapes)[0]
test_args.update({"value": random.uniform(-100, 100) for _ in range(5)})
test_args.update({"output_mem_config": dst_mem_config})
comparison_func = comparison_funcs.comp_pcc

run_single_pytorch_test(
"eltwise-rfloor_div",
input_shapes,
datagen_func,
comparison_func,
device,
test_args,
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
# SPDX-FileCopyrightText: © 2023-24 Tenstorrent Inc.

# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
import random
import numpy as np
from functools import partial
import tt_lib as ttl
from tests.tt_eager.python_api_testing.sweep_tests import (
comparison_funcs,
generation_funcs,
)
from tests.tt_eager.python_api_testing.sweep_tests.run_pytorch_ci_tests import (
run_single_pytorch_test,
)
from models.utility_functions import skip_for_grayskull

mem_configs = [
ttl.tensor.MemoryConfig(ttl.tensor.TensorMemoryLayout.INTERLEAVED, ttl.tensor.BufferType.DRAM),
ttl.tensor.MemoryConfig(ttl.tensor.TensorMemoryLayout.INTERLEAVED, ttl.tensor.BufferType.L1),
]


@pytest.mark.parametrize(
"input_shapes",
[
[[1, 1, 32, 32], [1, 1, 32, 32]],
[[1, 1, 320, 384], [1, 1, 320, 384]],
[[1, 3, 320, 384], [1, 3, 320, 384]],
],
)
@pytest.mark.parametrize(
"dst_mem_config",
mem_configs,
)
@skip_for_grayskull("#ToDo: GS implementation needs to be done for floor")
class TestUnary_Div_Trunc:
def test_run_unary_div_trunc(
self,
input_shapes,
dst_mem_config,
device,
):
datagen_func = [
generation_funcs.gen_func_with_cast(partial(generation_funcs.gen_rand, low=-1e6, high=1e6), torch.bfloat16)
]
test_args = generation_funcs.gen_default_dtype_layout_device(input_shapes)[0]
test_args.update({"value": random.uniform(-100, 100) for _ in range(5)})
test_args.update({"output_mem_config": dst_mem_config})
comparison_func = comparison_funcs.comp_pcc

run_single_pytorch_test(
"eltwise-unary_div_trunc",
input_shapes,
datagen_func,
comparison_func,
device,
test_args,
)
Loading

0 comments on commit 9756f18

Please sign in to comment.