Skip to content

Commit

Permalink
#11512: Refactor sharded ops
Browse files Browse the repository at this point in the history
  • Loading branch information
npetrovic-tenstorrent committed Dec 24, 2024
1 parent 3983193 commit 05becaf
Show file tree
Hide file tree
Showing 6 changed files with 252 additions and 30 deletions.
1 change: 1 addition & 0 deletions .github/workflows/ttnn-run-sweeps.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ on:
- eltwise.unary.hardsigmoid.hardsigmoid_pytorch2
- eltwise.unary.leaky_relu.leaky_relu_pytorch2
- eltwise.unary.abs.abs
- eltwise.unary.abs.abs_sharded
- eltwise.unary.abs.abs_forge
- eltwise.unary.cos.cos
- eltwise.unary.cos.cos_pytorch2
Expand Down
111 changes: 111 additions & 0 deletions tests/sweep_framework/sweeps/eltwise/unary/abs/abs_sharded.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc.

# SPDX-License-Identifier: Apache-2.0

from typing import Optional, Tuple
from functools import partial

import json
import torch
import random
import ttnn
import math
from tests.sweep_framework.sweep_utils.utils import gen_shapes, sanitize_shape_rm
from tests.sweep_framework.sweep_utils.sharding_utils import (
gen_sharded_spec_unary,
parse_sharding_spec,
invalidate_vector_sharding,
)
from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt

from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time
from models.utility_functions import torch_random

# Override the default timeout in seconds for hang detection.
TIMEOUT = 120

random.seed(0)


# Parameters provided to the test vector generator are defined here.
# They are defined as dict-type suites that contain the arguments to the run function as keys, and lists of possible inputs as values.
# Each suite has a key name (in this case "suite_1" and "suite_2") which will associate the test vectors to this specific suite of inputs.
# Developers can create their own generator functions and pass them to the parameters as inputs.
parameters = {
"xfail": {
"input_spec": gen_sharded_spec_unary(16, layouts=["TILE_LAYOUT"]),
"input_a_dtype": [ttnn.bfloat16],
},
}


# Invalidate vector is called during the generation phase where each vector will be passed in.
# If invalidated, the vector will still be stored but will be skipped.
# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid.
def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]:
input_layout = test_vector["input_spec"]["input_layout"]
sharding_invalidated, output_str = invalidate_vector_sharding(test_vector["input_spec"])

if input_layout == "ROW_MAJOR_LAYOUT":
return True, "Inputs to eltwise binary must be tilized"
if sharding_invalidated:
return sharding_invalidated, output_str
return False, None


# This is the run instructions for the test, defined by the developer.
# The run function must take the above-defined parameters as inputs.
# The runner will call this run function with each test vector, and the returned results from this function will be stored.
# If you defined a mesh_device_fixture above, the object you yielded will be passed into this function as 'device'. Otherwise, it will be the default ttnn device opened by the infra.
def run(
input_spec,
input_a_dtype,
*,
device,
) -> list:
data_seed = random.randint(0, 20000000)
torch.manual_seed(data_seed)

(
input_shape,
core_grid,
sharding_strategy,
shard_orientation,
tensor_hw_as_shard_shape,
input_layout,
shard_height_mul_of_32,
) = parse_sharding_spec(input_spec)

if input_layout == ttnn.ROW_MAJOR_LAYOUT:
input_shape = sanitize_shape_rm(input_shape)

torch_input_tensor_a = gen_func_with_cast_tt(
partial(torch_random, low=-100, high=100, dtype=torch.float32), input_a_dtype
)(input_shape)
golden_function = ttnn.get_golden_function(ttnn.abs)
torch_output_tensor = golden_function(torch_input_tensor_a)

sharded_config = ttnn.create_sharded_memory_config_(
shape=input_shape,
core_grid=core_grid,
strategy=sharding_strategy,
orientation=shard_orientation,
use_height_and_width_as_shard_shape=tensor_hw_as_shard_shape,
tile_layout=shard_height_mul_of_32,
)

input_tensor_a = ttnn.from_torch(
torch_input_tensor_a,
dtype=input_a_dtype,
layout=input_layout,
device=device,
memory_config=sharded_config,
)

start_time = start_measuring_time()
output_tensor = ttnn.abs(input_tensor_a, memory_config=sharded_config)
e2e_perf = stop_measuring_time(start_time)
output_tensor = ttnn.to_torch(output_tensor)

pcc = check_with_pcc(torch_output_tensor, output_tensor, 0.999)
return [pcc, e2e_perf]
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,11 @@
import ttnn
import math
from tests.sweep_framework.sweep_utils.utils import gen_shapes, sanitize_shape_rm
from tests.sweep_framework.sweep_utils.sharding_utils import gen_sharded_spec_unary, parse_sharding_spec
from tests.sweep_framework.sweep_utils.sharding_utils import (
gen_sharded_spec_unary,
parse_sharding_spec,
invalidate_vector_sharding,
)
from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt

from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time
Expand All @@ -29,7 +33,7 @@
# Developers can create their own generator functions and pass them to the parameters as inputs.
parameters = {
"xfail": {
"input_spec": gen_sharded_spec_unary(16, max_tensor_size_per_core=20 * 1024, layouts=["TILE_LAYOUT"]),
"input_spec": gen_sharded_spec_unary(16, layouts=["TILE_LAYOUT"]),
"input_a_dtype": [ttnn.bfloat16],
},
}
Expand All @@ -39,16 +43,13 @@
# If invalidated, the vector will still be stored but will be skipped.
# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid.
def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]:
input_shape, X, Y, sharding_strategy, _, _, input_layout = test_vector["input_spec"].values()
pre_sharded_height = math.prod(input_shape[:-1])
pre_sharded_width = input_shape[-1]
input_layout = test_vector["input_spec"]["input_layout"]
sharding_invalidated, output_str = invalidate_vector_sharding(test_vector["input_spec"])

if input_layout == "ROW_MAJOR_LAYOUT":
return True, "Input to eltwise binary must be tilized"

if input_layout == "ROW_MAJOR_LAYOUT" and test_vector["input_a_dtype"] == ttnn.bfloat8_b:
return True, "bfloat8_b is only supported on tiled layout"

return True, "Inputs to eltwise binary must be tilized"
if sharding_invalidated:
return sharding_invalidated, output_str
return False, None


Expand All @@ -72,6 +73,7 @@ def run(
shard_orientation,
tensor_hw_as_shard_shape,
input_layout,
shard_height_mul_of_32,
) = parse_sharding_spec(input_spec)

if input_layout == ttnn.ROW_MAJOR_LAYOUT:
Expand All @@ -89,6 +91,7 @@ def run(
strategy=sharding_strategy,
orientation=shard_orientation,
use_height_and_width_as_shard_shape=tensor_hw_as_shard_shape,
tile_layout=shard_height_mul_of_32,
)

input_tensor_a = ttnn.from_torch(
Expand Down
23 changes: 13 additions & 10 deletions tests/sweep_framework/sweeps/eltwise/unary/neg/neg_sharded.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,11 @@
import ttnn
import math
from tests.sweep_framework.sweep_utils.utils import gen_shapes, sanitize_shape_rm
from tests.sweep_framework.sweep_utils.sharding_utils import gen_sharded_spec_unary, parse_sharding_spec
from tests.sweep_framework.sweep_utils.sharding_utils import (
gen_sharded_spec_unary,
parse_sharding_spec,
invalidate_vector_sharding,
)
from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt

from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time
Expand All @@ -29,7 +33,7 @@
# Developers can create their own generator functions and pass them to the parameters as inputs.
parameters = {
"xfail": {
"input_spec": gen_sharded_spec_unary(16, max_tensor_size_per_core=20 * 1024, layouts=["TILE_LAYOUT"]),
"input_spec": gen_sharded_spec_unary(16, layouts=["TILE_LAYOUT"]),
"input_a_dtype": [ttnn.bfloat16],
},
}
Expand All @@ -39,16 +43,13 @@
# If invalidated, the vector will still be stored but will be skipped.
# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid.
def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]:
input_shape, X, Y, sharding_strategy, _, _, input_layout = test_vector["input_spec"].values()
pre_sharded_height = math.prod(input_shape[:-1])
pre_sharded_width = input_shape[-1]
input_layout = test_vector["input_spec"]["input_layout"]
sharding_invalidated, output_str = invalidate_vector_sharding(test_vector["input_spec"])

if input_layout == "ROW_MAJOR_LAYOUT":
return True, "Input to eltwise binary must be tilized"

if input_layout == "ROW_MAJOR_LAYOUT" and test_vector["input_a_dtype"] == ttnn.bfloat8_b:
return True, "bfloat8_b is only supported on tiled layout"

return True, "Inputs to eltwise binary must be tilized"
if sharding_invalidated:
return sharding_invalidated, output_str
return False, None


Expand All @@ -72,6 +73,7 @@ def run(
shard_orientation,
tensor_hw_as_shard_shape,
input_layout,
shard_height_mul_of_32,
) = parse_sharding_spec(input_spec)

if input_layout == ttnn.ROW_MAJOR_LAYOUT:
Expand All @@ -89,6 +91,7 @@ def run(
strategy=sharding_strategy,
orientation=shard_orientation,
use_height_and_width_as_shard_shape=tensor_hw_as_shard_shape,
tile_layout=shard_height_mul_of_32,
)

input_tensor_a = ttnn.from_torch(
Expand Down
23 changes: 13 additions & 10 deletions tests/sweep_framework/sweeps/eltwise/unary/selu/selu_sharded.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,11 @@
import ttnn
import math
from tests.sweep_framework.sweep_utils.utils import gen_shapes, sanitize_shape_rm
from tests.sweep_framework.sweep_utils.sharding_utils import gen_sharded_spec_unary, parse_sharding_spec
from tests.sweep_framework.sweep_utils.sharding_utils import (
gen_sharded_spec_unary,
parse_sharding_spec,
invalidate_vector_sharding,
)
from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt

from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time
Expand All @@ -29,7 +33,7 @@
# Developers can create their own generator functions and pass them to the parameters as inputs.
parameters = {
"xfail": {
"input_spec": gen_sharded_spec_unary(16, max_tensor_size_per_core=20 * 1024, layouts=["TILE_LAYOUT"]),
"input_spec": gen_sharded_spec_unary(16, layouts=["TILE_LAYOUT"]),
"input_a_dtype": [ttnn.bfloat16],
},
}
Expand All @@ -39,16 +43,13 @@
# If invalidated, the vector will still be stored but will be skipped.
# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid.
def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]:
input_shape, X, Y, sharding_strategy, _, _, input_layout = test_vector["input_spec"].values()
pre_sharded_height = math.prod(input_shape[:-1])
pre_sharded_width = input_shape[-1]
input_layout = test_vector["input_spec"]["input_layout"]
sharding_invalidated, output_str = invalidate_vector_sharding(test_vector["input_spec"])

if input_layout == "ROW_MAJOR_LAYOUT":
return True, "Input to eltwise binary must be tilized"

if input_layout == "ROW_MAJOR_LAYOUT" and test_vector["input_a_dtype"] == ttnn.bfloat8_b:
return True, "bfloat8_b is only supported on tiled layout"

return True, "Inputs to eltwise binary must be tilized"
if sharding_invalidated:
return sharding_invalidated, output_str
return False, None


Expand All @@ -72,6 +73,7 @@ def run(
shard_orientation,
tensor_hw_as_shard_shape,
input_layout,
shard_height_mul_of_32,
) = parse_sharding_spec(input_spec)

if input_layout == ttnn.ROW_MAJOR_LAYOUT:
Expand All @@ -89,6 +91,7 @@ def run(
strategy=sharding_strategy,
orientation=shard_orientation,
use_height_and_width_as_shard_shape=tensor_hw_as_shard_shape,
tile_layout=shard_height_mul_of_32,
)

input_tensor_a = ttnn.from_torch(
Expand Down
Loading

0 comments on commit 05becaf

Please sign in to comment.