Skip to content

Commit

Permalink
Sharded sweeps: prelu, softmax, sinh, softplus, relu_max and relu_min (
Browse files Browse the repository at this point in the history
…#16050)

#11512

### Ticket
[Link to Github
Issue](#11512)
### Problem description
In sweep tests for sharded ops we add:
- prelu
- softmax
- sinh
- relu_max
- relu_min
- softplus
### What's changed
Added new sweep tests
### Pass rates for new sweeps:

`sweeps/eltwise/unary/sinh/sinh_sharded.py`: 65 fail, 317 pass (82.98%)
`sweeps/eltwise/unary/softmax/softmax_sharded.py`: 382 fail, 0 pass (0%)
`sweeps/eltwise/unary/relu_max/relu_max_sharded.py`: 65 fail, 317 pass
(82.98%)
`sweeps/eltwise/unary/prelu/prelu_sharded.py`: 260 fail, 1268 pass
(82.98%)
`sweeps/eltwise/unary/relu_min/relu_min_sharded.py`: 65 fail, 317 pass
(82.98%) `sweeps/eltwise/unary/softplus/softplus_sharded.py`: 77 fail,
305 pass (79.84%).


### Checklist
- [X] [Post commit CI
passes](https://github.com/tenstorrent/tt-metal/actions/runs/12159246607)
- [X] Sweep tests pass
  • Loading branch information
npetrovic-tenstorrent authored Jan 9, 2025
1 parent 81f9494 commit 3dcd13d
Show file tree
Hide file tree
Showing 8 changed files with 687 additions and 1 deletion.
8 changes: 7 additions & 1 deletion .github/workflows/ttnn-run-sweeps.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -125,17 +125,22 @@ on:
- eltwise.unary.hardsigmoid.hardsigmoid_sharded
- eltwise.unary.hardshrink.hardshrink
- eltwise.unary.hardshrink.hardshrink_sharded
- eltwise.unary.softmax.softmax
- normalization.softmax.softmax
- normalization.softmax.softmax_sharded
- eltwise.unary.identity.identity
- eltwise.unary.identity.identity_sharded
- eltwise.unary.neg.neg
- eltwise.unary.neg.neg_sharded
- eltwise.unary.sinh.sinh
- eltwise.unary.sinh.sinh_sharded
- eltwise.unary.asinh.asinh
- eltwise.unary.cosh.cosh
- eltwise.unary.relu_min.relu_min
- eltwise.unary.relu_min.relu_min_sharded
- eltwise.unary.relu_max.relu_max
- eltwise.unary.relu_max.relu_max_sharded
- eltwise.unary.softplus.softplus
- eltwise.unary.softplus.softplus_sharded
- eltwise.unary.selu.selu
- eltwise.unary.selu.selu_sharded
- eltwise.unary.softshrink.softshrink_sharded
Expand Down Expand Up @@ -236,6 +241,7 @@ on:
- eltwise.unary.lez.lez
- eltwise.unary.nez.nez
- eltwise.unary.prelu.prelu
- eltwise.unary.prelu.prelu_sharded
- eltwise.unary.hardswish.hardswish_pytorch2
- eltwise.unary.hardtanh.hardtanh_pytorch2
- eltwise.unary.leaky_relu.leaky_relu
Expand Down
114 changes: 114 additions & 0 deletions tests/sweep_framework/sweeps/eltwise/unary/prelu/prelu_sharded.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc.

# SPDX-License-Identifier: Apache-2.0

from typing import Optional, Tuple
from functools import partial

import json
import torch
import random
import ttnn
import math
from tests.sweep_framework.sweep_utils.utils import gen_shapes, sanitize_shape_rm
from tests.sweep_framework.sweep_utils.sharding_utils import (
gen_sharded_spec_unary,
parse_sharding_spec,
invalidate_vector_sharding,
)
from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt

from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time
from models.utility_functions import torch_random

# Override the default timeout in seconds for hang detection.
TIMEOUT = 120

random.seed(0)


# Parameters provided to the test vector generator are defined here.
# They are defined as dict-type suites that contain the arguments to the run function as keys, and lists of possible inputs as values.
# Each suite has a key name (in this case "suite_1" and "suite_2") which will associate the test vectors to this specific suite of inputs.
# Developers can create their own generator functions and pass them to the parameters as inputs.
parameters = {
"xfail": {
"input_spec": gen_sharded_spec_unary(16, max_tensor_size_per_core=20 * 1024, layouts=["TILE_LAYOUT"]),
"input_a_dtype": [ttnn.bfloat16],
"weight": [-0.5, 0, 0.01, 0.5],
},
}


# Invalidate vector is called during the generation phase where each vector will be passed in.
# If invalidated, the vector will still be stored but will be skipped.
# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid.
def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]:
input_layout = test_vector["input_spec"]["input_layout"]
sharding_invalidated, output_str = invalidate_vector_sharding(test_vector["input_spec"])

if input_layout == "ROW_MAJOR_LAYOUT":
return True, "Inputs to eltwise binary must be tilized"
if sharding_invalidated:
return sharding_invalidated, output_str
return False, None


# This is the run instructions for the test, defined by the developer.
# The run function must take the above-defined parameters as inputs.
# The runner will call this run function with each test vector, and the returned results from this function will be stored.
# If you defined a mesh_device_fixture above, the object you yielded will be passed into this function as 'device'. Otherwise, it will be the default ttnn device opened by the infra.
def run(
input_spec,
input_a_dtype,
weight,
*,
device,
) -> list:
data_seed = random.randint(0, 20000000)
torch.manual_seed(data_seed)

(
input_shape,
core_grid,
sharding_strategy,
shard_orientation,
tensor_hw_as_shard_shape,
input_layout,
shard_height_mul_of_32,
) = parse_sharding_spec(input_spec)

if input_layout == ttnn.ROW_MAJOR_LAYOUT:
input_shape = sanitize_shape_rm(input_shape)

torch_input_tensor_a = gen_func_with_cast_tt(
partial(torch_random, low=-100, high=100, dtype=torch.float32), input_a_dtype
)(input_shape)
golden_function = ttnn.get_golden_function(ttnn.prelu)

torch_output_tensor = golden_function(torch_input_tensor_a, weight)

sharded_config = ttnn.create_sharded_memory_config_(
shape=input_shape,
core_grid=core_grid,
strategy=sharding_strategy,
orientation=shard_orientation,
use_height_and_width_as_shard_shape=tensor_hw_as_shard_shape,
tile_layout=shard_height_mul_of_32,
)

input_tensor_a = ttnn.from_torch(
torch_input_tensor_a,
dtype=input_a_dtype,
layout=input_layout,
device=device,
memory_config=sharded_config,
)

start_time = start_measuring_time()
output_tensor = ttnn.prelu(input_tensor_a, weight, memory_config=sharded_config)
e2e_perf = stop_measuring_time(start_time)
output_tensor = ttnn.to_torch(output_tensor)

pcc = check_with_pcc(torch_output_tensor, output_tensor, 0.999)
return [pcc, e2e_perf]
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc.

# SPDX-License-Identifier: Apache-2.0

from typing import Optional, Tuple
from functools import partial

import json
import torch
import random
import ttnn
import math
from tests.sweep_framework.sweep_utils.utils import gen_shapes, sanitize_shape_rm
from tests.sweep_framework.sweep_utils.sharding_utils import (
gen_sharded_spec_unary,
parse_sharding_spec,
invalidate_vector_sharding,
)
from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt

from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time
from models.utility_functions import torch_random

# Override the default timeout in seconds for hang detection.
TIMEOUT = 120

random.seed(0)

# Parameters provided to the test vector generator are defined here.
# They are defined as dict-type suites that contain the arguments to the run function as keys, and lists of possible inputs as values.
# Each suite has a key name (in this case "suite_1" and "suite_2") which will associate the test vectors to this specific suite of inputs.
# Developers can create their own generator functions and pass them to the parameters as inputs.
parameters = {
"xfail": {
"input_spec": gen_sharded_spec_unary(16, max_tensor_size_per_core=20 * 1024, layouts=["TILE_LAYOUT"]),
"input_a_dtype": [ttnn.bfloat16],
},
}


# Invalidate vector is called during the generation phase where each vector will be passed in.
# If invalidated, the vector will still be stored but will be skipped.
# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid.
def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]:
input_layout = test_vector["input_spec"]["input_layout"]
sharding_invalidated, output_str = invalidate_vector_sharding(test_vector["input_spec"])

if input_layout == "ROW_MAJOR_LAYOUT":
return True, "Inputs to eltwise binary must be tilized"
if sharding_invalidated:
return sharding_invalidated, output_str
return False, None


# This is the run instructions for the test, defined by the developer.
# The run function must take the above-defined parameters as inputs.
# The runner will call this run function with each test vector, and the returned results from this function will be stored.
# If you defined a mesh_device_fixture above, the object you yielded will be passed into this function as 'device'. Otherwise, it will be the default ttnn device opened by the infra.
def run(
input_spec,
input_a_dtype,
*,
device,
) -> list:
data_seed = random.randint(0, 20000000)
torch.manual_seed(data_seed)

(
input_shape,
core_grid,
sharding_strategy,
shard_orientation,
tensor_hw_as_shard_shape,
input_layout,
shard_height_mul_of_32,
) = parse_sharding_spec(input_spec)

if input_layout == ttnn.ROW_MAJOR_LAYOUT:
input_shape = sanitize_shape_rm(input_shape)

torch_input_tensor_a = gen_func_with_cast_tt(
partial(torch_random, low=-100, high=100, dtype=torch.float32), input_a_dtype
)(input_shape)
golden_function = ttnn.get_golden_function(ttnn.relu_max)

upper_limit = torch.tensor(1, dtype=torch.bfloat16).uniform_(0, 100).item()

torch_output_tensor = golden_function(torch_input_tensor_a, upper_limit=upper_limit)

sharded_config = ttnn.create_sharded_memory_config_(
shape=input_shape,
core_grid=core_grid,
strategy=sharding_strategy,
orientation=shard_orientation,
use_height_and_width_as_shard_shape=tensor_hw_as_shard_shape,
tile_layout=shard_height_mul_of_32,
)

input_tensor_a = ttnn.from_torch(
torch_input_tensor_a,
dtype=input_a_dtype,
layout=input_layout,
device=device,
memory_config=sharded_config,
)

start_time = start_measuring_time()
output_tensor = ttnn.relu_max(input_tensor_a, upper_limit, memory_config=sharded_config)
e2e_perf = stop_measuring_time(start_time)
output_tensor = ttnn.to_torch(output_tensor)

pcc = check_with_pcc(torch_output_tensor, output_tensor, 0.999)
return [pcc, e2e_perf]
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc.

# SPDX-License-Identifier: Apache-2.0


from typing import Optional, Tuple
from functools import partial

import json
import torch
import random
import ttnn
import math
from tests.sweep_framework.sweep_utils.utils import gen_shapes, sanitize_shape_rm
from tests.sweep_framework.sweep_utils.sharding_utils import (
gen_sharded_spec_unary,
parse_sharding_spec,
invalidate_vector_sharding,
)
from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt

from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time
from models.utility_functions import torch_random

# Override the default timeout in seconds for hang detection.
TIMEOUT = 120

random.seed(0)

# Parameters provided to the test vector generator are defined here.
# They are defined as dict-type suites that contain the arguments to the run function as keys, and lists of possible inputs as values.
# Each suite has a key name (in this case "suite_1" and "suite_2") which will associate the test vectors to this specific suite of inputs.
# Developers can create their own generator functions and pass them to the parameters as inputs.
parameters = {
"xfail": {
"input_spec": gen_sharded_spec_unary(16, max_tensor_size_per_core=20 * 1024, layouts=["TILE_LAYOUT"]),
"input_a_dtype": [ttnn.bfloat16],
},
}


# Invalidate vector is called during the generation phase where each vector will be passed in.
# If invalidated, the vector will still be stored but will be skipped.
# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid.
def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]:
input_layout = test_vector["input_spec"]["input_layout"]
sharding_invalidated, output_str = invalidate_vector_sharding(test_vector["input_spec"])

if input_layout == "ROW_MAJOR_LAYOUT":
return True, "Inputs to eltwise binary must be tilized"
if sharding_invalidated:
return sharding_invalidated, output_str
return False, None


# This is the run instructions for the test, defined by the developer.
# The run function must take the above-defined parameters as inputs.
# The runner will call this run function with each test vector, and the returned results from this function will be stored.
# If you defined a mesh_device_fixture above, the object you yielded will be passed into this function as 'device'. Otherwise, it will be the default ttnn device opened by the infra.
def run(
input_spec,
input_a_dtype,
*,
device,
) -> list:
data_seed = random.randint(0, 20000000)
torch.manual_seed(data_seed)

(
input_shape,
core_grid,
sharding_strategy,
shard_orientation,
tensor_hw_as_shard_shape,
input_layout,
shard_height_mul_of_32,
) = parse_sharding_spec(input_spec)

if input_layout == ttnn.ROW_MAJOR_LAYOUT:
input_shape = sanitize_shape_rm(input_shape)

torch_input_tensor_a = gen_func_with_cast_tt(
partial(torch_random, low=-100, high=100, dtype=torch.float32), input_a_dtype
)(input_shape)
golden_function = ttnn.get_golden_function(ttnn.relu_min)

lower_limit = torch.tensor(1, dtype=torch.bfloat16).uniform_(0, 100).item()

torch_output_tensor = golden_function(torch_input_tensor_a, lower_limit=lower_limit)

sharded_config = ttnn.create_sharded_memory_config_(
shape=input_shape,
core_grid=core_grid,
strategy=sharding_strategy,
orientation=shard_orientation,
use_height_and_width_as_shard_shape=tensor_hw_as_shard_shape,
tile_layout=shard_height_mul_of_32,
)

input_tensor_a = ttnn.from_torch(
torch_input_tensor_a,
dtype=input_a_dtype,
layout=input_layout,
device=device,
memory_config=sharded_config,
)

start_time = start_measuring_time()
output_tensor = ttnn.relu_min(input_tensor_a, lower_limit, memory_config=sharded_config)
e2e_perf = stop_measuring_time(start_time)
output_tensor = ttnn.to_torch(output_tensor)

pcc = check_with_pcc(torch_output_tensor, output_tensor, 0.999)
return [pcc, e2e_perf]
Loading

0 comments on commit 3dcd13d

Please sign in to comment.