diff --git a/.github/workflows/ttnn-run-sweeps.yaml b/.github/workflows/ttnn-run-sweeps.yaml index aa83fe8f960..9949748f78d 100644 --- a/.github/workflows/ttnn-run-sweeps.yaml +++ b/.github/workflows/ttnn-run-sweeps.yaml @@ -234,24 +234,31 @@ on: - eltwise.binary.multiply.multiply_scalar_pytorch2 - eltwise.binary.div.div - eltwise.binary.div.div_tensor_pytorch2 + - eltwise.binary.div.div_forge - eltwise.binary.div_no_nan.div_no_nan - eltwise.binary.logical_or.logical_or_ - eltwise.binary.logical_or.logical_or - eltwise.binary.logical_or.logical_or_output + - eltwise.binary.logical_or.logical_or_forge - eltwise.binary.logical_xor.logical_xor_ - eltwise.binary.logical_xor.logical_xor - eltwise.binary.logical_and.logical_and_ - eltwise.binary.logical_and.logical_and - eltwise.binary.logical_and.logical_and_output + - eltwise.binary.logical_and.logical_and_forge - eltwise.binary.polyval.polyval - eltwise.binary.remainder.remainder + - eltwise.binary.remainder.remainder_scalar_pytorch2 + - eltwise.binary.remainder.remainder_forge - eltwise.binary.squared_difference.squared_difference - eltwise.binary.squared_difference_output.squared_difference_output - - eltwise.binary.remainder.remainder_scalar_pytorch2 - eltwise.binary.bcast.bcast_h_sharded - eltwise.binary.bcast.bcast - eltwise.binary.eq.eq_scalar_pytorch2 + - eltwise.binary.eq.eq_forge + - eltwise.binary.ge.ge_forge - eltwise.binary.gt.gt_scalar_pytorch2 + - eltwise.binary.gt.gt_forge - eltwise.binary.le.le_tensor_pytorch2 - eltwise.binary.fmod.fmod - eltwise.binary.floor_divide.floor_divide_pytorch2 @@ -260,7 +267,9 @@ on: - eltwise.binary.ldexp.ldexp - eltwise.binary.lt.lt_tensor_pytorch2 - eltwise.binary.lt.lt_scalar_pytorch2 + - eltwise.binary.lt.lt_forge - eltwise.binary.ne.ne_scalar_pytorch2 + - eltwise.binary.ne.ne_forge - eltwise.binary.hypot.hypot - eltwise.binary.xlogy.xlogy - eltwise.binary_backward.ldexp_bw.ldexp_bw @@ -293,6 +302,7 @@ on: - eltwise.ternary.lerp.lerp - eltwise.ternary.where.where - eltwise.ternary.where.where_pytorch2 + - eltwise.ternary.where.where_forge - eltwise.ternary_backward.addcmul_bw - eltwise.ternary_backward.addcdiv_bw - embedding.embedding diff --git a/tests/sweep_framework/sweeps/eltwise/binary/div/div_forge.py b/tests/sweep_framework/sweeps/eltwise/binary/div/div_forge.py new file mode 100644 index 00000000000..123fd0cbd57 --- /dev/null +++ b/tests/sweep_framework/sweeps/eltwise/binary/div/div_forge.py @@ -0,0 +1,265 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. + +# SPDX-License-Identifier: Apache-2.0 + +from typing import Optional, Tuple +from functools import partial + +import torch +import ttnn +from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt + +from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time +from models.utility_functions import torch_random + + +# Parameters provided to the test vector generator are defined here. +# They are defined as dict-type suites that contain the arguments to the run function as keys, and lists of possible inputs as values. +# Each suite has a key name (in this case "suite_1" and "suite_2") which will associate the test vectors to this specific suite of inputs. +# Developers can create their own generator functions and pass them to the parameters as inputs. + + +parameters = { + "nightly": { + "input_shape": [ + {"self": [1, 12, 1, 1], "other": [1, 12, 1, 1], "input_dtype": "ttnn.float32"}, + {"self": [1, 16, 1, 1], "other": [1, 16, 1, 1], "input_dtype": "ttnn.float32"}, + {"self": [1, 6, 1, 1], "other": [1, 6, 1, 1], "input_dtype": "ttnn.float32"}, + {"self": [1, 8, 1, 1], "other": [1, 8, 1, 1], "input_dtype": "ttnn.float32"}, + {"self": [1], "other": [1], "input_dtype": "ttnn.float32"}, + {"self": [1, 1024, 640], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 1280, 16, 16], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 1280, 8, 8], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 12, 197, 197], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 12, 201, 201], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 12, 8, 8], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 16, 197, 197], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 1], "other": [1], "input_dtype": "ttnn.float32"}, + {"self": [1, 1, 16384, 256], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 1, 19200, 300], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 256, 1280], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 2, 4096, 256], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 2, 4800, 300], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 320, 64, 64], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 4096, 320], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 512], "other": [1, 1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 5, 1024, 256], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 5, 1200, 300], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 640, 32, 32], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 64, 1280], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 8, 2048, 256], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 8, 256, 2048], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 8, 256, 256], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 8, 300, 300], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [2, 512], "other": [2, 1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 1024, 512], "other": [1, 1024, 512], "input_dtype": "ttnn.float32"}, + {"self": [1, 1024, 640], "other": [1, 1024, 640], "input_dtype": "ttnn.float32"}, + {"self": [1, 10, 3072], "other": [1, 10, 3072], "input_dtype": "ttnn.float32"}, + {"self": [1, 10, 768], "other": [1, 10, 768], "input_dtype": "ttnn.float32"}, + {"self": [1, 1200, 1280], "other": [1, 1200, 1280], "input_dtype": "ttnn.float32"}, + {"self": [1, 1445, 768], "other": [1, 1445, 768], "input_dtype": "ttnn.float32"}, + {"self": [1, 1536], "other": [1, 1536], "input_dtype": "ttnn.float32"}, + {"self": [1, 16384, 128], "other": [1, 16384, 128], "input_dtype": "ttnn.float32"}, + {"self": [1, 16, 3072], "other": [1, 16, 3072], "input_dtype": "ttnn.float32"}, + {"self": [1, 19200, 256], "other": [1, 19200, 256], "input_dtype": "ttnn.float32"}, + {"self": [1, 197, 3072], "other": [1, 197, 3072], "input_dtype": "ttnn.float32"}, + {"self": [1, 197, 4096], "other": [1, 197, 4096], "input_dtype": "ttnn.float32"}, + {"self": [1, 19, 4096], "other": [1, 19, 4096], "input_dtype": "ttnn.float32"}, + {"self": [1, 201, 3072], "other": [1, 201, 3072], "input_dtype": "ttnn.float32"}, + {"self": [1, 2048, 768], "other": [1, 2048, 768], "input_dtype": "ttnn.float32"}, + {"self": [1, 256, 1024], "other": [1, 256, 1024], "input_dtype": "ttnn.float32"}, + {"self": [1, 256, 1280], "other": [1, 256, 1280], "input_dtype": "ttnn.float32"}, + {"self": [1, 256, 256], "other": [1, 256, 256], "input_dtype": "ttnn.float32"}, + {"self": [1, 256, 4096], "other": [1, 256, 4096], "input_dtype": "ttnn.float32"}, + {"self": [1, 256, 5120], "other": [1, 256, 5120], "input_dtype": "ttnn.float32"}, + {"self": [1, 25, 3072], "other": [1, 25, 3072], "input_dtype": "ttnn.float32"}, + {"self": [1, 300, 2048], "other": [1, 300, 2048], "input_dtype": "ttnn.float32"}, + {"self": [1, 3072, 8], "other": [1, 3072, 8], "input_dtype": "ttnn.float32"}, + {"self": [1, 4096, 1280], "other": [1, 4096, 1280], "input_dtype": "ttnn.float32"}, + {"self": [1, 4096, 256], "other": [1, 4096, 256], "input_dtype": "ttnn.float32"}, + {"self": [1, 4800, 512], "other": [1, 4800, 512], "input_dtype": "ttnn.float32"}, + {"self": [1, 64, 5120], "other": [1, 64, 5120], "input_dtype": "ttnn.float32"}, + {"self": [1, 7, 18176], "other": [1, 7, 18176], "input_dtype": "ttnn.float32"}, + {"self": [1, 1024], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 768], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 1, 1], "other": [1], "input_dtype": "ttnn.float32"}, + {"self": [1, 512], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [112], "other": [112], "input_dtype": "ttnn.float32"}, + {"self": [116], "other": [116], "input_dtype": "ttnn.float32"}, + {"self": [120], "other": [120], "input_dtype": "ttnn.float32"}, + {"self": [128], "other": [128], "input_dtype": "ttnn.float32"}, + {"self": [1280], "other": [1280], "input_dtype": "ttnn.float32"}, + {"self": [134], "other": [134], "input_dtype": "ttnn.float32"}, + {"self": [14], "other": [14], "input_dtype": "ttnn.float32"}, + {"self": [144], "other": [144], "input_dtype": "ttnn.float32"}, + {"self": [16], "other": [16], "input_dtype": "ttnn.float32"}, + {"self": [160], "other": [160], "input_dtype": "ttnn.float32"}, + {"self": [168], "other": [168], "input_dtype": "ttnn.float32"}, + {"self": [184], "other": [184], "input_dtype": "ttnn.float32"}, + {"self": [192], "other": [192], "input_dtype": "ttnn.float32"}, + {"self": [196], "other": [196], "input_dtype": "ttnn.float32"}, + {"self": [20], "other": [20], "input_dtype": "ttnn.float32"}, + {"self": [200], "other": [200], "input_dtype": "ttnn.float32"}, + {"self": [2048], "other": [2048], "input_dtype": "ttnn.float32"}, + {"self": [24], "other": [24], "input_dtype": "ttnn.float32"}, + {"self": [240], "other": [240], "input_dtype": "ttnn.float32"}, + {"self": [256], "other": [256], "input_dtype": "ttnn.float32"}, + {"self": [272], "other": [272], "input_dtype": "ttnn.float32"}, + {"self": [28], "other": [28], "input_dtype": "ttnn.float32"}, + {"self": [32], "other": [32], "input_dtype": "ttnn.float32"}, + {"self": [320], "other": [320], "input_dtype": "ttnn.float32"}, + {"self": [334], "other": [334], "input_dtype": "ttnn.float32"}, + {"self": [34], "other": [34], "input_dtype": "ttnn.float32"}, + {"self": [384], "other": [384], "input_dtype": "ttnn.float32"}, + {"self": [40], "other": [40], "input_dtype": "ttnn.float32"}, + {"self": [46], "other": [46], "input_dtype": "ttnn.float32"}, + {"self": [462], "other": [462], "input_dtype": "ttnn.float32"}, + {"self": [480], "other": [480], "input_dtype": "ttnn.float32"}, + {"self": [512], "other": [512], "input_dtype": "ttnn.float32"}, + {"self": [576], "other": [576], "input_dtype": "ttnn.float32"}, + {"self": [58], "other": [58], "input_dtype": "ttnn.float32"}, + {"self": [64], "other": [64], "input_dtype": "ttnn.float32"}, + {"self": [640], "other": [640], "input_dtype": "ttnn.float32"}, + {"self": [672], "other": [672], "input_dtype": "ttnn.float32"}, + {"self": [68], "other": [68], "input_dtype": "ttnn.float32"}, + {"self": [72], "other": [72], "input_dtype": "ttnn.float32"}, + {"self": [78], "other": [78], "input_dtype": "ttnn.float32"}, + {"self": [80], "other": [80], "input_dtype": "ttnn.float32"}, + {"self": [96], "other": [96], "input_dtype": "ttnn.float32"}, + {"self": [960], "other": [960], "input_dtype": "ttnn.float32"}, + {"self": [98], "other": [98], "input_dtype": "ttnn.float32"}, + # {"self": [1, 12, 10, 10], "other": [1, 12, 10, 1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 12, 197, 197], "other": [1, 12, 197, 1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 12, 1, 10], "other": [1, 12, 1, 1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 12, 201, 201], "other": [1, 12, 201, 1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 12, 8, 8], "other": [1, 12, 8, 1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 16, 10, 10], "other": [1, 16, 10, 1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 16, 197, 197], "other": [1, 16, 197, 1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 16, 1, 10], "other": [1, 16, 1, 1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 16, 32, 32], "other": [1, 16, 32, 1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 16, 5, 5], "other": [1, 16, 5, 1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 1, 16384, 256], "other": [1, 1, 16384, 1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 1, 19200, 300], "other": [1, 1, 19200, 1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 2, 4096, 256], "other": [1, 2, 4096, 1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 2, 4800, 300], "other": [1, 2, 4800, 1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 5, 1024, 256], "other": [1, 5, 1024, 1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 5, 1200, 300], "other": [1, 5, 1200, 1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 6, 15, 15], "other": [1, 6, 15, 1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 6, 1, 15], "other": [1, 6, 1, 1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 8, 10, 10], "other": [1, 8, 10, 1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 8, 1, 10], "other": [1, 8, 1, 1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 8, 2048, 256], "other": [1, 8, 2048, 1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 8, 256, 2048], "other": [1, 8, 256, 1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 8, 256, 256], "other": [1, 8, 256, 1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 8, 300, 300], "other": [1, 8, 300, 1], "input_dtype": "ttnn.float32"}, + # {"self": [8, 100, 100], "other": [8, 100, 1], "input_dtype": "ttnn.float32"}, + # {"self": [8, 100, 920], "other": [8, 100, 1], "input_dtype": "ttnn.float32"}, + # {"self": [8, 920, 920], "other": [8, 920, 1], "input_dtype": "ttnn.float32"}, + # {"self": [10, 10], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [128], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [15, 15], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [160], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 16, 5, 5], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 23, 40], "other": [1, 1, 40], "input_dtype": "ttnn.float32"}, + # {"self": [1, 23, 40], "other": [1, 23, 1], "input_dtype": "ttnn.float32"}, + # {"self": [20], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [2], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [3234, 1], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [3], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [3, 320, 320], "other": [3, 1, 1], "input_dtype": "ttnn.float32"}, + # {"self": [5], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [197], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [19], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 480, 1, 1], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 672, 1, 1], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 72, 1, 1], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 184, 20, 20], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 200, 20, 20], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 240, 20, 20], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 240, 40, 40], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 480, 10, 10], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 480, 20, 20], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 672, 10, 10], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 672, 20, 20], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 10, 1], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 120, 1, 1], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 1280, 1, 1], "other": [1], "input_dtype": "ttnn.bfloat16"}, + # {"self": [1, 15, 1], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 2048, 1, 1], "other": [1], "input_dtype": "ttnn.bfloat16"}, + # {"self": [1, 32, 1], "other": [1], "input_dtype": "ttnn.float32"}, + # {"self": [1, 512, 1, 1], "other": [1], "input_dtype": "ttnn.bfloat16"}, + ], + "input_a_layout": [ttnn.TILE_LAYOUT], + "input_b_layout": [ttnn.TILE_LAYOUT], + "input_a_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + "input_b_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + "output_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + }, +} + + +# Invalidate vector is called during the generation phase where each vector will be passed in. +# If invalidated, the vector will still be stored but will be skipped. +# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid. +def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]: + if test_vector["input_a_layout"] == ttnn.ROW_MAJOR_LAYOUT or test_vector["input_b_layout"] == ttnn.ROW_MAJOR_LAYOUT: + return True, "Row Major layout is not supported" + return False, None + + +# This is the run instructions for the test, defined by the developer. +# The run function must take the above-defined parameters as inputs. +# The runner will call this run function with each test vector, and the returned results from this function will be stored. +# If you defined a mesh_device_fixture above, the object you yielded will be passed into this function as 'device'. Otherwise, it will be the default ttnn device opened by the infra. +def run( + input_shape, + input_a_layout, + input_b_layout, + input_a_memory_config, + input_b_memory_config, + output_memory_config, + *, + device, +) -> list: + torch.manual_seed(0) + if input_shape["input_dtype"] == "ttnn.bfloat16": + input_dtype = ttnn.bfloat16 + elif input_shape["input_dtype"] == "ttnn.float32": + input_dtype = ttnn.float32 + elif input_shape["input_dtype"] == "ttnn.int32": + input_dtype = ttnn.int32 + + torch_input_tensor_a = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.float32), input_dtype + )(input_shape["self"]) + torch_input_tensor_b = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.float32), input_dtype + )(input_shape["other"]) + + golden_function = ttnn.get_golden_function(ttnn.div) + torch_output_tensor = golden_function(torch_input_tensor_a, torch_input_tensor_b) + + input_tensor_a = ttnn.from_torch( + torch_input_tensor_a, + dtype=input_dtype, + layout=input_a_layout, + device=device, + memory_config=input_a_memory_config, + ) + + input_tensor_b = ttnn.from_torch( + torch_input_tensor_b, + dtype=input_dtype, + layout=input_b_layout, + device=device, + memory_config=input_b_memory_config, + ) + + start_time = start_measuring_time() + result = ttnn.div(input_tensor_a, input_tensor_b, memory_config=output_memory_config) + # ToDo: Update it once the tensor layout support with rank < 2 is supported in mid of Jan + output_tensor = ttnn.to_torch(result, torch_rank=len(input_shape["self"])) + e2e_perf = stop_measuring_time(start_time) + + return [check_with_pcc(torch_output_tensor, output_tensor, 0.999), e2e_perf] diff --git a/tests/sweep_framework/sweeps/eltwise/binary/eq/eq_forge.py b/tests/sweep_framework/sweeps/eltwise/binary/eq/eq_forge.py new file mode 100644 index 00000000000..7a65df7c24f --- /dev/null +++ b/tests/sweep_framework/sweeps/eltwise/binary/eq/eq_forge.py @@ -0,0 +1,104 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. + +# SPDX-License-Identifier: Apache-2.0 + +from typing import Optional, Tuple +from functools import partial + +import torch +import ttnn +from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt + +from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time +from models.utility_functions import torch_random + + +# Parameters provided to the test vector generator are defined here. +# They are defined as dict-type suites that contain the arguments to the run function as keys, and lists of possible inputs as values. +# Each suite has a key name (in this case "suite_1" and "suite_2") which will associate the test vectors to this specific suite of inputs. +# Developers can create their own generator functions and pass them to the parameters as inputs. + + +parameters = { + "xfail": { + "input_shape": [ + {"self": [1, 1, 5, 5], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 1, 7, 7], "other": [1], "input_dtype": "ttnn.bfloat16"}, + {"self": [1, 45], "other": [1, 45], "input_dtype": "ttnn.int32"}, + {"self": [1, 5], "other": [1, 5], "input_dtype": "ttnn.int32"}, + {"self": [1, 6], "other": [1, 6], "input_dtype": "ttnn.int32"}, + {"self": [1, 7], "other": [1, 7], "input_dtype": "ttnn.int32"}, + ], + "input_a_layout": [ttnn.TILE_LAYOUT], + "input_b_layout": [ttnn.TILE_LAYOUT], + "input_a_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + "input_b_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + "output_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + }, +} + + +# Invalidate vector is called during the generation phase where each vector will be passed in. +# If invalidated, the vector will still be stored but will be skipped. +# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid. +def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]: + if test_vector["input_a_layout"] == ttnn.ROW_MAJOR_LAYOUT or test_vector["input_b_layout"] == ttnn.ROW_MAJOR_LAYOUT: + return True, "Row Major layout is not supported" + return False, None + + +# This is the run instructions for the test, defined by the developer. +# The run function must take the above-defined parameters as inputs. +# The runner will call this run function with each test vector, and the returned results from this function will be stored. +# If you defined a mesh_device_fixture above, the object you yielded will be passed into this function as 'device'. Otherwise, it will be the default ttnn device opened by the infra. +def run( + input_shape, + input_a_layout, + input_b_layout, + input_a_memory_config, + input_b_memory_config, + output_memory_config, + *, + device, +) -> list: + torch.manual_seed(0) + if input_shape["input_dtype"] == "ttnn.bfloat16": + input_dtype = ttnn.bfloat16 + elif input_shape["input_dtype"] == "ttnn.float32": + input_dtype = ttnn.float32 + elif input_shape["input_dtype"] == "ttnn.int32": + input_dtype = ttnn.int32 + + torch_input_tensor_a = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.float32), input_dtype + )(input_shape["self"]) + torch_input_tensor_b = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.float32), input_dtype + )(input_shape["other"]) + + golden_function = ttnn.get_golden_function(ttnn.eq) + torch_output_tensor = golden_function(torch_input_tensor_a, torch_input_tensor_b) + + input_tensor_a = ttnn.from_torch( + torch_input_tensor_a, + dtype=input_dtype, + layout=input_a_layout, + device=device, + memory_config=input_a_memory_config, + ) + + input_tensor_b = ttnn.from_torch( + torch_input_tensor_b, + dtype=input_dtype, + layout=input_b_layout, + device=device, + memory_config=input_b_memory_config, + ) + + start_time = start_measuring_time() + result = ttnn.eq(input_tensor_a, input_tensor_b, memory_config=output_memory_config) + # ToDo: Update it once the tensor layout support with rank < 2 is supported in mid of Jan + output_tensor = ttnn.to_torch(result, torch_rank=len(input_shape["self"])) + e2e_perf = stop_measuring_time(start_time) + + return [check_with_pcc(torch_output_tensor, output_tensor, 0.999), e2e_perf] diff --git a/tests/sweep_framework/sweeps/eltwise/binary/ge/ge_forge.py b/tests/sweep_framework/sweeps/eltwise/binary/ge/ge_forge.py new file mode 100644 index 00000000000..c252eeaec31 --- /dev/null +++ b/tests/sweep_framework/sweeps/eltwise/binary/ge/ge_forge.py @@ -0,0 +1,103 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. + +# SPDX-License-Identifier: Apache-2.0 + +from typing import Optional, Tuple +from functools import partial + +import torch +import ttnn +from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt + +from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time +from models.utility_functions import torch_random + + +# Parameters provided to the test vector generator are defined here. +# They are defined as dict-type suites that contain the arguments to the run function as keys, and lists of possible inputs as values. +# Each suite has a key name (in this case "suite_1" and "suite_2") which will associate the test vectors to this specific suite of inputs. +# Developers can create their own generator functions and pass them to the parameters as inputs. + + +parameters = { + "xfail": { + "input_shape": [[1]], + "input_a_dtype": [ttnn.int32], + "input_b_dtype": [ttnn.int32], + "input_a_layout": [ttnn.TILE_LAYOUT], + "input_b_layout": [ttnn.TILE_LAYOUT], + "input_a_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + "input_b_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + "output_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + }, +} + + +def mesh_device_fixture(): + device = ttnn.open_device(device_id=0) + assert ttnn.device.is_wormhole_b0(device), "This op is available for Wormhole_B0 only" + yield (device, "Wormhole_B0") + ttnn.close_device(device) + del device + + +# Invalidate vector is called during the generation phase where each vector will be passed in. +# If invalidated, the vector will still be stored but will be skipped. +# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid. +def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]: + if test_vector["input_a_layout"] == ttnn.ROW_MAJOR_LAYOUT or test_vector["input_b_layout"] == ttnn.ROW_MAJOR_LAYOUT: + return True, "Row Major layout is not supported" + return False, None + + +# This is the run instructions for the test, defined by the developer. +# The run function must take the above-defined parameters as inputs. +# The runner will call this run function with each test vector, and the returned results from this function will be stored. +# If you defined a mesh_device_fixture above, the object you yielded will be passed into this function as 'device'. Otherwise, it will be the default ttnn device opened by the infra. +def run( + input_shape, + input_a_dtype, + input_b_dtype, + input_a_layout, + input_b_layout, + input_a_memory_config, + input_b_memory_config, + output_memory_config, + *, + device, +) -> list: + torch.manual_seed(0) + + torch_input_tensor_a = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.int32), input_a_dtype + )(input_shape) + torch_input_tensor_b = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.int32), input_b_dtype + )(input_shape) + + golden_function = ttnn.get_golden_function(ttnn.ge) + torch_output_tensor = golden_function(torch_input_tensor_a, torch_input_tensor_b) + + input_tensor_a = ttnn.from_torch( + torch_input_tensor_a, + dtype=input_a_dtype, + layout=input_a_layout, + device=device, + memory_config=input_a_memory_config, + ) + + input_tensor_b = ttnn.from_torch( + torch_input_tensor_b, + dtype=input_b_dtype, + layout=input_b_layout, + device=device, + memory_config=input_b_memory_config, + ) + + start_time = start_measuring_time() + result = ttnn.ge(input_tensor_a, input_tensor_b, memory_config=output_memory_config) + # ToDo: Update it once the tensor layout support with rank < 2 is supported in mid of Jan + output_tensor = ttnn.to_torch(result, torch_rank=len(input_shape)) + e2e_perf = stop_measuring_time(start_time) + + return [check_with_pcc(torch_output_tensor, output_tensor, 0.999), e2e_perf] diff --git a/tests/sweep_framework/sweeps/eltwise/binary/gt/gt_forge.py b/tests/sweep_framework/sweeps/eltwise/binary/gt/gt_forge.py new file mode 100644 index 00000000000..b76398e0c60 --- /dev/null +++ b/tests/sweep_framework/sweeps/eltwise/binary/gt/gt_forge.py @@ -0,0 +1,102 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. + +# SPDX-License-Identifier: Apache-2.0 + +from typing import Optional, Tuple +from functools import partial + +import torch +import ttnn +from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt + +from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time +from models.utility_functions import torch_random + + +# Parameters provided to the test vector generator are defined here. +# They are defined as dict-type suites that contain the arguments to the run function as keys, and lists of possible inputs as values. +# Each suite has a key name (in this case "suite_1" and "suite_2") which will associate the test vectors to this specific suite of inputs. +# Developers can create their own generator functions and pass them to the parameters as inputs. + + +parameters = { + "xfail": { + "input_shape": [[15, 15]], + "input_a_dtype": [ttnn.int32], + "input_b_dtype": [ttnn.int32], + "input_a_layout": [ttnn.TILE_LAYOUT], + "input_b_layout": [ttnn.TILE_LAYOUT], + "input_a_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + "input_b_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + "output_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + }, +} + + +def mesh_device_fixture(): + device = ttnn.open_device(device_id=0) + assert ttnn.device.is_wormhole_b0(device), "This op is available for Wormhole_B0 only" + yield (device, "Wormhole_B0") + ttnn.close_device(device) + del device + + +# Invalidate vector is called during the generation phase where each vector will be passed in. +# If invalidated, the vector will still be stored but will be skipped. +# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid. +def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]: + if test_vector["input_a_layout"] == ttnn.ROW_MAJOR_LAYOUT or test_vector["input_b_layout"] == ttnn.ROW_MAJOR_LAYOUT: + return True, "Row Major layout is not supported" + return False, None + + +# This is the run instructions for the test, defined by the developer. +# The run function must take the above-defined parameters as inputs. +# The runner will call this run function with each test vector, and the returned results from this function will be stored. +# If you defined a mesh_device_fixture above, the object you yielded will be passed into this function as 'device'. Otherwise, it will be the default ttnn device opened by the infra. +def run( + input_shape, + input_a_dtype, + input_b_dtype, + input_a_layout, + input_b_layout, + input_a_memory_config, + input_b_memory_config, + output_memory_config, + *, + device, +) -> list: + torch.manual_seed(0) + + torch_input_tensor_a = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.int32), input_a_dtype + )(input_shape) + torch_input_tensor_b = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.int32), input_b_dtype + )(input_shape) + + golden_function = ttnn.get_golden_function(ttnn.gt) + torch_output_tensor = golden_function(torch_input_tensor_a, torch_input_tensor_b) + + input_tensor_a = ttnn.from_torch( + torch_input_tensor_a, + dtype=input_a_dtype, + layout=input_a_layout, + device=device, + memory_config=input_a_memory_config, + ) + + input_tensor_b = ttnn.from_torch( + torch_input_tensor_b, + dtype=input_b_dtype, + layout=input_b_layout, + device=device, + memory_config=input_b_memory_config, + ) + + start_time = start_measuring_time() + result = ttnn.gt(input_tensor_a, input_tensor_b, memory_config=output_memory_config) + output_tensor = ttnn.to_torch(result) + e2e_perf = stop_measuring_time(start_time) + + return [check_with_pcc(torch_output_tensor, output_tensor, 0.999), e2e_perf] diff --git a/tests/sweep_framework/sweeps/eltwise/binary/logical_and/logical_and_forge.py b/tests/sweep_framework/sweeps/eltwise/binary/logical_and/logical_and_forge.py new file mode 100644 index 00000000000..c7bb05c291c --- /dev/null +++ b/tests/sweep_framework/sweeps/eltwise/binary/logical_and/logical_and_forge.py @@ -0,0 +1,103 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. + +# SPDX-License-Identifier: Apache-2.0 + +from typing import Optional, Tuple +from functools import partial + +import torch +import ttnn +from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt + +from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time +from models.utility_functions import torch_random + + +# Parameters provided to the test vector generator are defined here. +# They are defined as dict-type suites that contain the arguments to the run function as keys, and lists of possible inputs as values. +# Each suite has a key name (in this case "suite_1" and "suite_2") which will associate the test vectors to this specific suite of inputs. +# Developers can create their own generator functions and pass them to the parameters as inputs. + + +parameters = { + "nightly": { + "input_shape": [[19]], + "input_a_dtype": [ttnn.int32], + "input_b_dtype": [ttnn.int32], + "input_a_layout": [ttnn.TILE_LAYOUT], + "input_b_layout": [ttnn.TILE_LAYOUT], + "input_a_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + "input_b_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + "output_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + }, +} + + +def mesh_device_fixture(): + device = ttnn.open_device(device_id=0) + assert ttnn.device.is_wormhole_b0(device), "This op is available for Wormhole_B0 only" + yield (device, "Wormhole_B0") + ttnn.close_device(device) + del device + + +# Invalidate vector is called during the generation phase where each vector will be passed in. +# If invalidated, the vector will still be stored but will be skipped. +# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid. +def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]: + if test_vector["input_a_layout"] == ttnn.ROW_MAJOR_LAYOUT or test_vector["input_b_layout"] == ttnn.ROW_MAJOR_LAYOUT: + return True, "Row Major layout is not supported" + return False, None + + +# This is the run instructions for the test, defined by the developer. +# The run function must take the above-defined parameters as inputs. +# The runner will call this run function with each test vector, and the returned results from this function will be stored. +# If you defined a mesh_device_fixture above, the object you yielded will be passed into this function as 'device'. Otherwise, it will be the default ttnn device opened by the infra. +def run( + input_shape, + input_a_dtype, + input_b_dtype, + input_a_layout, + input_b_layout, + input_a_memory_config, + input_b_memory_config, + output_memory_config, + *, + device, +) -> list: + torch.manual_seed(0) + + torch_input_tensor_a = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.int32), input_a_dtype + )(input_shape) + torch_input_tensor_b = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.int32), input_b_dtype + )(input_shape) + + golden_function = ttnn.get_golden_function(ttnn.logical_and) + torch_output_tensor = golden_function(torch_input_tensor_a, torch_input_tensor_b) + + input_tensor_a = ttnn.from_torch( + torch_input_tensor_a, + dtype=input_a_dtype, + layout=input_a_layout, + device=device, + memory_config=input_a_memory_config, + ) + + input_tensor_b = ttnn.from_torch( + torch_input_tensor_b, + dtype=input_b_dtype, + layout=input_b_layout, + device=device, + memory_config=input_b_memory_config, + ) + + start_time = start_measuring_time() + result = ttnn.logical_and(input_tensor_a, input_tensor_b, memory_config=output_memory_config) + # ToDo: Update it once the tensor layout support with rank < 2 is supported in mid of Jan + output_tensor = ttnn.to_torch(result, torch_rank=len(input_shape)) + e2e_perf = stop_measuring_time(start_time) + + return [check_with_pcc(torch_output_tensor, output_tensor, 0.999), e2e_perf] diff --git a/tests/sweep_framework/sweeps/eltwise/binary/logical_or/logical_or_forge.py b/tests/sweep_framework/sweeps/eltwise/binary/logical_or/logical_or_forge.py new file mode 100644 index 00000000000..32c0926075e --- /dev/null +++ b/tests/sweep_framework/sweeps/eltwise/binary/logical_or/logical_or_forge.py @@ -0,0 +1,95 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. + +# SPDX-License-Identifier: Apache-2.0 + +from typing import Optional, Tuple +from functools import partial + +import torch +import ttnn +from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt + +from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time +from models.utility_functions import torch_random + + +# Parameters provided to the test vector generator are defined here. +# They are defined as dict-type suites that contain the arguments to the run function as keys, and lists of possible inputs as values. +# Each suite has a key name (in this case "suite_1" and "suite_2") which will associate the test vectors to this specific suite of inputs. +# Developers can create their own generator functions and pass them to the parameters as inputs. + + +parameters = { + "nightly": { + "input_shape": [[1]], + "input_a_dtype": [ttnn.bfloat16], + "input_b_dtype": [ttnn.bfloat16], + "input_a_layout": [ttnn.TILE_LAYOUT], + "input_b_layout": [ttnn.TILE_LAYOUT], + "input_a_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + "input_b_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + "output_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + }, +} + + +# Invalidate vector is called during the generation phase where each vector will be passed in. +# If invalidated, the vector will still be stored but will be skipped. +# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid. +def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]: + if test_vector["input_a_layout"] == ttnn.ROW_MAJOR_LAYOUT or test_vector["input_b_layout"] == ttnn.ROW_MAJOR_LAYOUT: + return True, "Row Major layout is not supported" + return False, None + + +# This is the run instructions for the test, defined by the developer. +# The run function must take the above-defined parameters as inputs. +# The runner will call this run function with each test vector, and the returned results from this function will be stored. +# If you defined a mesh_device_fixture above, the object you yielded will be passed into this function as 'device'. Otherwise, it will be the default ttnn device opened by the infra. +def run( + input_shape, + input_a_dtype, + input_b_dtype, + input_a_layout, + input_b_layout, + input_a_memory_config, + input_b_memory_config, + output_memory_config, + *, + device, +) -> list: + torch.manual_seed(0) + + torch_input_tensor_a = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.bfloat16), input_a_dtype + )(input_shape) + torch_input_tensor_b = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.bfloat16), input_b_dtype + )(input_shape) + + golden_function = ttnn.get_golden_function(ttnn.logical_or) + torch_output_tensor = golden_function(torch_input_tensor_a, torch_input_tensor_b) + + input_tensor_a = ttnn.from_torch( + torch_input_tensor_a, + dtype=input_a_dtype, + layout=input_a_layout, + device=device, + memory_config=input_a_memory_config, + ) + + input_tensor_b = ttnn.from_torch( + torch_input_tensor_b, + dtype=input_b_dtype, + layout=input_b_layout, + device=device, + memory_config=input_b_memory_config, + ) + + start_time = start_measuring_time() + result = ttnn.logical_or(input_tensor_a, input_tensor_b, memory_config=output_memory_config) + # ToDo: Update it once the tensor layout support with rank < 2 is supported in mid of Jan + output_tensor = ttnn.to_torch(result, torch_rank=len(input_shape)) + e2e_perf = stop_measuring_time(start_time) + + return [check_with_pcc(torch_output_tensor, output_tensor, 0.999), e2e_perf] diff --git a/tests/sweep_framework/sweeps/eltwise/binary/lt/lt_forge.py b/tests/sweep_framework/sweeps/eltwise/binary/lt/lt_forge.py new file mode 100644 index 00000000000..a3e43f29561 --- /dev/null +++ b/tests/sweep_framework/sweeps/eltwise/binary/lt/lt_forge.py @@ -0,0 +1,103 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. + +# SPDX-License-Identifier: Apache-2.0 + +from typing import Optional, Tuple +from functools import partial + +import torch +import ttnn +from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt + +from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time +from models.utility_functions import torch_random + + +# Parameters provided to the test vector generator are defined here. +# They are defined as dict-type suites that contain the arguments to the run function as keys, and lists of possible inputs as values. +# Each suite has a key name (in this case "suite_1" and "suite_2") which will associate the test vectors to this specific suite of inputs. +# Developers can create their own generator functions and pass them to the parameters as inputs. + + +parameters = { + "xfail": { + "input_shape": [[15, 15], [19], [1, 1]], + "input_a_dtype": [ttnn.int32], + "input_b_dtype": [ttnn.int32], + "input_a_layout": [ttnn.TILE_LAYOUT], + "input_b_layout": [ttnn.TILE_LAYOUT], + "input_a_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + "input_b_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + "output_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + }, +} + + +def mesh_device_fixture(): + device = ttnn.open_device(device_id=0) + assert ttnn.device.is_wormhole_b0(device), "This op is available for Wormhole_B0 only" + yield (device, "Wormhole_B0") + ttnn.close_device(device) + del device + + +# Invalidate vector is called during the generation phase where each vector will be passed in. +# If invalidated, the vector will still be stored but will be skipped. +# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid. +def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]: + if test_vector["input_a_layout"] == ttnn.ROW_MAJOR_LAYOUT or test_vector["input_b_layout"] == ttnn.ROW_MAJOR_LAYOUT: + return True, "Row Major layout is not supported" + return False, None + + +# This is the run instructions for the test, defined by the developer. +# The run function must take the above-defined parameters as inputs. +# The runner will call this run function with each test vector, and the returned results from this function will be stored. +# If you defined a mesh_device_fixture above, the object you yielded will be passed into this function as 'device'. Otherwise, it will be the default ttnn device opened by the infra. +def run( + input_shape, + input_a_dtype, + input_b_dtype, + input_a_layout, + input_b_layout, + input_a_memory_config, + input_b_memory_config, + output_memory_config, + *, + device, +) -> list: + torch.manual_seed(0) + + torch_input_tensor_a = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.int32), input_a_dtype + )(input_shape) + torch_input_tensor_b = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.int32), input_b_dtype + )(input_shape) + + golden_function = ttnn.get_golden_function(ttnn.lt) + torch_output_tensor = golden_function(torch_input_tensor_a, torch_input_tensor_b) + + input_tensor_a = ttnn.from_torch( + torch_input_tensor_a, + dtype=input_a_dtype, + layout=input_a_layout, + device=device, + memory_config=input_a_memory_config, + ) + + input_tensor_b = ttnn.from_torch( + torch_input_tensor_b, + dtype=input_b_dtype, + layout=input_b_layout, + device=device, + memory_config=input_b_memory_config, + ) + + start_time = start_measuring_time() + result = ttnn.lt(input_tensor_a, input_tensor_b, memory_config=output_memory_config) + # ToDo: Update it once the tensor layout support with rank < 2 is supported in mid of Jan + output_tensor = ttnn.to_torch(result, torch_rank=len(input_shape)) + e2e_perf = stop_measuring_time(start_time) + + return [check_with_pcc(torch_output_tensor, output_tensor, 0.999), e2e_perf] diff --git a/tests/sweep_framework/sweeps/eltwise/binary/ne/ne_forge.py b/tests/sweep_framework/sweeps/eltwise/binary/ne/ne_forge.py new file mode 100644 index 00000000000..689938bf0b5 --- /dev/null +++ b/tests/sweep_framework/sweeps/eltwise/binary/ne/ne_forge.py @@ -0,0 +1,102 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. + +# SPDX-License-Identifier: Apache-2.0 + +from typing import Optional, Tuple +from functools import partial + +import torch +import ttnn +from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt + +from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time +from models.utility_functions import torch_random + + +# Parameters provided to the test vector generator are defined here. +# They are defined as dict-type suites that contain the arguments to the run function as keys, and lists of possible inputs as values. +# Each suite has a key name (in this case "suite_1" and "suite_2") which will associate the test vectors to this specific suite of inputs. +# Developers can create their own generator functions and pass them to the parameters as inputs. + + +parameters = { + "nightly": { + "input_shape": [[1, 10]], + "input_a_dtype": [ttnn.int32], + "input_b_dtype": [ttnn.int32], + "input_a_layout": [ttnn.TILE_LAYOUT], + "input_b_layout": [ttnn.TILE_LAYOUT], + "input_a_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + "input_b_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + "output_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + }, +} + + +def mesh_device_fixture(): + device = ttnn.open_device(device_id=0) + assert ttnn.device.is_wormhole_b0(device), "This op is available for Wormhole_B0 only" + yield (device, "Wormhole_B0") + ttnn.close_device(device) + del device + + +# Invalidate vector is called during the generation phase where each vector will be passed in. +# If invalidated, the vector will still be stored but will be skipped. +# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid. +def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]: + if test_vector["input_a_layout"] == ttnn.ROW_MAJOR_LAYOUT or test_vector["input_b_layout"] == ttnn.ROW_MAJOR_LAYOUT: + return True, "Row Major layout is not supported" + return False, None + + +# This is the run instructions for the test, defined by the developer. +# The run function must take the above-defined parameters as inputs. +# The runner will call this run function with each test vector, and the returned results from this function will be stored. +# If you defined a mesh_device_fixture above, the object you yielded will be passed into this function as 'device'. Otherwise, it will be the default ttnn device opened by the infra. +def run( + input_shape, + input_a_dtype, + input_b_dtype, + input_a_layout, + input_b_layout, + input_a_memory_config, + input_b_memory_config, + output_memory_config, + *, + device, +) -> list: + torch.manual_seed(0) + + torch_input_tensor_a = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.float32), input_a_dtype + )(input_shape) + torch_input_tensor_b = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.float32), input_b_dtype + )(input_shape) + + golden_function = ttnn.get_golden_function(ttnn.ne) + torch_output_tensor = golden_function(torch_input_tensor_a, torch_input_tensor_b) + + input_tensor_a = ttnn.from_torch( + torch_input_tensor_a, + dtype=input_a_dtype, + layout=input_a_layout, + device=device, + memory_config=input_a_memory_config, + ) + + input_tensor_b = ttnn.from_torch( + torch_input_tensor_b, + dtype=input_b_dtype, + layout=input_b_layout, + device=device, + memory_config=input_b_memory_config, + ) + + start_time = start_measuring_time() + result = ttnn.ne(input_tensor_a, input_tensor_b, memory_config=output_memory_config) + output_tensor = ttnn.to_torch(result) + e2e_perf = stop_measuring_time(start_time) + + return [check_with_pcc(torch_output_tensor, output_tensor, 0.999), e2e_perf] diff --git a/tests/sweep_framework/sweeps/eltwise/binary/remainder/remainder_forge.py b/tests/sweep_framework/sweeps/eltwise/binary/remainder/remainder_forge.py new file mode 100644 index 00000000000..496ffc0abe2 --- /dev/null +++ b/tests/sweep_framework/sweeps/eltwise/binary/remainder/remainder_forge.py @@ -0,0 +1,95 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. + +# SPDX-License-Identifier: Apache-2.0 + +from typing import Optional, Tuple +from functools import partial + +import torch +import ttnn +from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt + +from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time +from models.utility_functions import torch_random + + +# Parameters provided to the test vector generator are defined here. +# They are defined as dict-type suites that contain the arguments to the run function as keys, and lists of possible inputs as values. +# Each suite has a key name (in this case "suite_1" and "suite_2") which will associate the test vectors to this specific suite of inputs. +# Developers can create their own generator functions and pass them to the parameters as inputs. + + +parameters = { + "xfail": { + "input_shape": [[1]], + "input_a_dtype": [ttnn.int32], + "input_b_dtype": [ttnn.int32], + "input_a_layout": [ttnn.TILE_LAYOUT], + "input_b_layout": [ttnn.TILE_LAYOUT], + "input_a_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + "input_b_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + "output_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + }, +} + + +# Invalidate vector is called during the generation phase where each vector will be passed in. +# If invalidated, the vector will still be stored but will be skipped. +# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid. +def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]: + if test_vector["input_a_layout"] == ttnn.ROW_MAJOR_LAYOUT or test_vector["input_b_layout"] == ttnn.ROW_MAJOR_LAYOUT: + return True, "Row Major layout is not supported" + return False, None + + +# This is the run instructions for the test, defined by the developer. +# The run function must take the above-defined parameters as inputs. +# The runner will call this run function with each test vector, and the returned results from this function will be stored. +# If you defined a mesh_device_fixture above, the object you yielded will be passed into this function as 'device'. Otherwise, it will be the default ttnn device opened by the infra. +def run( + input_shape, + input_a_dtype, + input_b_dtype, + input_a_layout, + input_b_layout, + input_a_memory_config, + input_b_memory_config, + output_memory_config, + *, + device, +) -> list: + torch.manual_seed(0) + + torch_input_tensor_a = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.int32), input_a_dtype + )(input_shape) + torch_input_tensor_b = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.int32), input_b_dtype + )(input_shape) + + golden_function = ttnn.get_golden_function(ttnn.remainder) + torch_output_tensor = golden_function(torch_input_tensor_a, torch_input_tensor_b) + + input_tensor_a = ttnn.from_torch( + torch_input_tensor_a, + dtype=input_a_dtype, + layout=input_a_layout, + device=device, + memory_config=input_a_memory_config, + ) + + input_tensor_b = ttnn.from_torch( + torch_input_tensor_b, + dtype=input_b_dtype, + layout=input_b_layout, + device=device, + memory_config=input_b_memory_config, + ) + + start_time = start_measuring_time() + result = ttnn.remainder(input_tensor_a, input_tensor_b, memory_config=output_memory_config) + # ToDo: Update it once the tensor layout support with rank < 2 is supported in mid of Jan + output_tensor = ttnn.to_torch(result, torch_rank=len(input_shape)) + e2e_perf = stop_measuring_time(start_time) + + return [check_with_pcc(torch_output_tensor, output_tensor, 0.999), e2e_perf] diff --git a/tests/sweep_framework/sweeps/eltwise/ternary/where/where_forge.py b/tests/sweep_framework/sweeps/eltwise/ternary/where/where_forge.py new file mode 100644 index 00000000000..2fa7c381de2 --- /dev/null +++ b/tests/sweep_framework/sweeps/eltwise/ternary/where/where_forge.py @@ -0,0 +1,337 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. + +# SPDX-License-Identifier: Apache-2.0 + +from typing import Optional, Tuple +from functools import partial + +import torch +import ttnn +from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt, gen_bin + +from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time +from models.utility_functions import torch_random + + +# Parameters provided to the test vector generator are defined here. +# They are defined as dict-type suites that contain the arguments to the run function as keys, and lists of possible inputs as values. +# Each suite has a key name (in this case "suite_1" and "suite_2") which will associate the test vectors to this specific suite of inputs. +# Developers can create their own generator functions and pass them to the parameters as inputs. + + +parameters = { + "nightly": { + "input_shape": [ + { + "tensor1": [19, 19], + "tensor2": [1], + "tensor3": [19, 19], + "input_dtype_a": "ttnn.bfloat16", + "input_dtype_b": "ttnn.float32", + "input_dtype_c": "ttnn.float32", + }, + { + "tensor1": [1, 1, 10, 10], + "tensor2": [1], + "tensor3": [1, 1, 10, 10], + "input_dtype_a": "ttnn.bfloat16", + "input_dtype_b": "ttnn.bfloat16", + "input_dtype_c": "ttnn.bfloat16", + }, + { + "tensor1": [1, 1, 12, 12], + "tensor2": [1], + "tensor3": [1, 1, 12, 12], + "input_dtype_a": "ttnn.bfloat16", + "input_dtype_b": "ttnn.bfloat16", + "input_dtype_c": "ttnn.bfloat16", + }, + { + "tensor1": [1, 1, 14, 14], + "tensor2": [1], + "tensor3": [1, 1, 14, 14], + "input_dtype_a": "ttnn.bfloat16", + "input_dtype_b": "ttnn.bfloat16", + "input_dtype_c": "ttnn.bfloat16", + }, + { + "tensor1": [1, 1, 16, 16], + "tensor2": [1], + "tensor3": [1, 1, 16, 16], + "input_dtype_a": "ttnn.bfloat16", + "input_dtype_b": "ttnn.bfloat16", + "input_dtype_c": "ttnn.bfloat16", + }, + { + "tensor1": [1, 1, 19, 19], + "tensor2": [1], + "tensor3": [1, 1, 19, 19], + "input_dtype_a": "ttnn.bfloat16", + "input_dtype_b": "ttnn.bfloat16", + "input_dtype_c": "ttnn.bfloat16", + }, + { + "tensor1": [1, 1, 256, 256], + "tensor2": [1], + "tensor3": [1, 1, 256, 256], + "input_dtype_a": "ttnn.bfloat16", + "input_dtype_b": "ttnn.bfloat16", + "input_dtype_c": "ttnn.bfloat16", + }, + { + "tensor1": [1, 1, 25, 25], + "tensor2": [1], + "tensor3": [1, 1, 25, 25], + "input_dtype_a": "ttnn.bfloat16", + "input_dtype_b": "ttnn.bfloat16", + "input_dtype_c": "ttnn.bfloat16", + }, + { + "tensor1": [1, 1, 32, 32], + "tensor2": [1], + "tensor3": [1, 1, 32, 32], + "input_dtype_a": "ttnn.bfloat16", + "input_dtype_b": "ttnn.bfloat16", + "input_dtype_c": "ttnn.bfloat16", + }, + { + "tensor1": [1, 1, 5, 5], + "tensor2": [1], + "tensor3": [1, 1, 5, 5], + "input_dtype_a": "ttnn.bfloat16", + "input_dtype_b": "ttnn.bfloat16", + "input_dtype_c": "ttnn.bfloat16", + }, + { + "tensor1": [1, 1, 6, 6], + "tensor2": [1], + "tensor3": [1, 1, 6, 6], + "input_dtype_a": "ttnn.bfloat16", + "input_dtype_b": "ttnn.bfloat16", + "input_dtype_c": "ttnn.bfloat16", + }, + { + "tensor1": [1, 1, 7, 7], + "tensor2": [1], + "tensor3": [1, 1, 7, 7], + "input_dtype_a": "ttnn.bfloat16", + "input_dtype_b": "ttnn.bfloat16", + "input_dtype_c": "ttnn.bfloat16", + }, + { + "tensor1": [1, 1, 9, 9], + "tensor2": [1], + "tensor3": [1, 1, 9, 9], + "input_dtype_a": "ttnn.bfloat16", + "input_dtype_b": "ttnn.bfloat16", + "input_dtype_c": "ttnn.bfloat16", + }, + { + "tensor1": [1, 920], + "tensor2": [1], + "tensor3": [1, 920], + "input_dtype_a": "ttnn.bfloat16", + "input_dtype_b": "ttnn.bfloat16", + "input_dtype_c": "ttnn.bfloat16", + }, + { + "tensor1": [2, 1, 7, 7], + "tensor2": [1], + "tensor3": [2, 1, 7, 7], + "input_dtype_a": "ttnn.bfloat16", + "input_dtype_b": "ttnn.bfloat16", + "input_dtype_c": "ttnn.bfloat16", + }, + { + "tensor1": [6, 6], + "tensor2": [1], + "tensor3": [6, 6], + "input_dtype_a": "ttnn.bfloat16", + "input_dtype_b": "ttnn.float32", + "input_dtype_c": "ttnn.float32", + }, + { + "tensor1": [7, 7], + "tensor2": [1], + "tensor3": [7, 7], + "input_dtype_a": "ttnn.bfloat16", + "input_dtype_b": "ttnn.float32", + "input_dtype_c": "ttnn.float32", + }, + # { + # "tensor1": [1, 45], + # "tensor2": [1], + # "tensor3": [1, 45], + # "input_dtype_a": "ttnn.bfloat16", + # "input_dtype_b": "ttnn.int32", + # "input_dtype_c": "ttnn.int32", + # }, + # { + # "tensor1": [1, 5], + # "tensor2": [1], + # "tensor3": [1, 5], + # "input_dtype_a": "ttnn.bfloat16", + # "input_dtype_b": "ttnn.int32", + # "input_dtype_c": "ttnn.int32", + # }, + # { + # "tensor1": [1, 6], + # "tensor2": [1], + # "tensor3": [1, 6], + # "input_dtype_a": "ttnn.bfloat16", + # "input_dtype_b": "ttnn.int32", + # "input_dtype_c": "ttnn.int32", + # }, + # { + # "tensor1": [1, 197], + # "tensor2": [196, 197], + # "tensor3": [196, 197], + # "input_dtype_a": "ttnn.bfloat16", + # "input_dtype_b": "ttnn.int32", + # "input_dtype_c": "ttnn.int32", + # }, + # { + # "tensor1": [1, 19], + # "tensor2": [1, 19], + # "tensor3": [1, 19], + # "input_dtype_a": "ttnn.bfloat16", + # "input_dtype_b": "ttnn.int32", + # "input_dtype_c": "ttnn.int32", + # }, + # { + # "tensor1": [1, 1], + # "tensor2": [1, 1], + # "tensor3": [1, 1], + # "input_dtype_a": "ttnn.bfloat16", + # "input_dtype_b": "ttnn.int32", + # "input_dtype_c": "ttnn.int32", + # }, + # { + # "tensor1": [15, 15], + # "tensor2": [15, 15], + # "tensor3": [15, 15], + # "input_dtype_a": "ttnn.bfloat16", + # "input_dtype_b": "ttnn.int32", + # "input_dtype_c": "ttnn.int32", + # }, + # { + # "tensor1": [197, 1], + # "tensor2": [197, 197], + # "tensor3": [197, 197], + # "input_dtype_a": "ttnn.bfloat16", + # "input_dtype_b": "ttnn.int32", + # "input_dtype_c": "ttnn.int32", + # }, + # { + # "tensor1": [19], + # "tensor2": [19], + # "tensor3": [1], + # "input_dtype_a": "ttnn.bfloat16", + # "input_dtype_b": "ttnn.int32", + # "input_dtype_c": "ttnn.int32", + # }, + ], + "input_a_layout": [ttnn.TILE_LAYOUT], + "input_b_layout": [ttnn.TILE_LAYOUT], + "input_c_layout": [ttnn.TILE_LAYOUT], + "input_a_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + "input_b_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + "input_c_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + "output_memory_config": [ttnn.DRAM_MEMORY_CONFIG], + }, +} + + +# Invalidate vector is called during the generation phase where each vector will be passed in. +# If invalidated, the vector will still be stored but will be skipped. +# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid. +def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]: + if ( + test_vector["input_a_layout"] == ttnn.ROW_MAJOR_LAYOUT + or test_vector["input_b_layout"] == ttnn.ROW_MAJOR_LAYOUT + or test_vector["input_c_layout"] == ttnn.ROW_MAJOR_LAYOUT + ): + return True, "Row Major layout is not supported" + return False, None + + +# This is the run instructions for the test, defined by the developer. +# The run function must take the above-defined parameters as inputs. +# The runner will call this run function with each test vector, and the returned results from this function will be stored. +# If you defined a mesh_device_fixture above, the object you yielded will be passed into this function as 'device'. Otherwise, it will be the default ttnn device opened by the infra. +def run( + input_shape, + input_a_layout, + input_b_layout, + input_c_layout, + input_a_memory_config, + input_b_memory_config, + input_c_memory_config, + output_memory_config, + *, + device, +) -> list: + torch.manual_seed(0) + + if input_shape["input_dtype_a"] == "ttnn.bfloat16": + input_dtype_a = ttnn.bfloat16 + elif input_shape["input_dtype_a"] == "ttnn.float32": + input_dtype_a = ttnn.float32 + elif input_shape["input_dtype_a"] == "ttnn.int32": + input_dtype_a = ttnn.int32 + + if input_shape["input_dtype_b"] == "ttnn.bfloat16": + input_dtype_b = ttnn.bfloat16 + elif input_shape["input_dtype_b"] == "ttnn.float32": + input_dtype_b = ttnn.float32 + elif input_shape["input_dtype_b"] == "ttnn.int32": + input_dtype_b = ttnn.int32 + + if input_shape["input_dtype_c"] == "ttnn.bfloat16": + input_dtype_c = ttnn.bfloat16 + elif input_shape["input_dtype_c"] == "ttnn.float32": + input_dtype_c = ttnn.float32 + elif input_shape["input_dtype_c"] == "ttnn.int32": + input_dtype_c = ttnn.int32 + + torch_input_tensor_a = gen_func_with_cast_tt(gen_bin, input_dtype_a)(input_shape["tensor1"]) + torch_input_tensor_b = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.float32), input_dtype_b + )(input_shape["tensor2"]) + torch_input_tensor_c = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.float32), input_dtype_c + )(input_shape["tensor3"]) + + torch_output_tensor = torch.where(torch_input_tensor_a > 0, torch_input_tensor_b, torch_input_tensor_c) + + input_tensor_a = ttnn.from_torch( + torch_input_tensor_a, + dtype=input_dtype_a, + layout=input_a_layout, + device=device, + memory_config=input_a_memory_config, + ) + + input_tensor_b = ttnn.from_torch( + torch_input_tensor_b, + dtype=input_dtype_b, + layout=input_b_layout, + device=device, + memory_config=input_b_memory_config, + ) + + input_tensor_c = ttnn.from_torch( + torch_input_tensor_c, + dtype=input_dtype_c, + layout=input_b_layout, + device=device, + memory_config=input_b_memory_config, + ) + + start_time = start_measuring_time() + result = ttnn.where(input_tensor_a, input_tensor_b, input_tensor_c, memory_config=output_memory_config) + # ToDo: Update it once the tensor layout support with rank < 2 is supported in mid of Jan + output_tensor = ttnn.to_torch(result, torch_rank=len(input_shape["tensor1"])) + e2e_perf = stop_measuring_time(start_time) + + return [check_with_pcc(torch_output_tensor, output_tensor, 0.999), e2e_perf]