Skip to content

Commit

Permalink
Migrate sweeps of ops in test_composite.py from tt_eager to ttnn (#10829
Browse files Browse the repository at this point in the history
)

* #10147: Migrated ops lerp_binary to xlogy from tt-dnn/test_composite.py

* #10147: Migrated ops asinh to celu from tt-dnn/test_composite.py

* #10147: Modified test_eltwise_unary to pass for silu op

* #10147: Fixed range of logit op in test_composite.py

* #10147: Conflict fixes

* #10147: Reformating

* #10147: Reformating 2

* #10147: FIx test_addalpha
  • Loading branch information
amalbasaTT authored Jul 31, 2024
1 parent bf90b82 commit 5feb7bd
Show file tree
Hide file tree
Showing 195 changed files with 384 additions and 5,006 deletions.
176 changes: 0 additions & 176 deletions tests/tt_eager/python_api_testing/sweep_tests/op_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,6 @@
"tt_op": tt_lib_ops.move,
"pytorch_op": pytorch_ops.move,
},
"arange": {
"tt_op": tt_lib_ops.arange,
"pytorch_op": pytorch_ops.arange,
},
"prod": {
"tt_op": tt_lib_ops.prod,
"pytorch_op": pytorch_ops.prod,
Expand Down Expand Up @@ -96,18 +92,6 @@
"tt_op": tt_lib_ops.triu,
"pytorch_op": pytorch_ops.triu,
},
"eltwise-zeros": {
"tt_op": tt_lib_ops.zeros,
"pytorch_op": pytorch_ops.zeros,
},
"eltwise-empty": {
"tt_op": tt_lib_ops.empty,
"pytorch_op": pytorch_ops.empty,
},
"eltwise-ones": {
"tt_op": tt_lib_ops.ones,
"pytorch_op": pytorch_ops.ones,
},
"fill-rm": {
"tt_op": tt_lib_ops.fill_rm,
"pytorch_op": pytorch_ops.fill_rm,
Expand All @@ -124,22 +108,6 @@
"tt_op": tt_lib_ops.fill_zero_bw,
"pytorch_op": pytorch_ops.fill_zero_bw,
},
"eltwise-full": {
"tt_op": tt_lib_ops.full,
"pytorch_op": pytorch_ops.full,
},
"eltwise-zeros_like": {
"tt_op": tt_lib_ops.zeros_like,
"pytorch_op": pytorch_ops.zeros_like,
},
"eltwise-ones_like": {
"tt_op": tt_lib_ops.ones_like,
"pytorch_op": pytorch_ops.ones_like,
},
"eltwise-full_like": {
"tt_op": tt_lib_ops.full_like,
"pytorch_op": pytorch_ops.full_like,
},
"eltwise-div_unary": {
"tt_op": tt_lib_ops.eltwise_div_unary,
"pytorch_op": pytorch_ops.div_unary,
Expand Down Expand Up @@ -172,10 +140,6 @@
"tt_op": tt_lib_ops.eltwise_i0,
"pytorch_op": pytorch_ops.i0,
},
"eltwise-lgamma": {
"tt_op": tt_lib_ops.eltwise_lgamma,
"pytorch_op": pytorch_ops.lgamma,
},
"eltwise-logical_noti": {
"tt_op": tt_lib_ops.eltwise_logical_noti,
"pytorch_op": pytorch_ops.logical_noti,
Expand All @@ -184,18 +148,6 @@
"tt_op": None, # tt_lib_ops.eltwise_bitwise_complement,
"pytorch_op": None, # pytorch_ops.bitwise_complement,
},
"eltwise-logical_xor": {
"tt_op": tt_lib_ops.eltwise_logical_xor,
"pytorch_op": pytorch_ops.logical_xor,
},
"eltwise-sinh": {
"tt_op": tt_lib_ops.eltwise_sinh,
"pytorch_op": pytorch_ops.sinh,
},
"eltwise-cosh": {
"tt_op": tt_lib_ops.eltwise_cosh,
"pytorch_op": pytorch_ops.cosh,
},
"eltwise-ltz": {
"tt_op": tt_lib_ops.eltwise_ltz,
"pytorch_op": pytorch_ops.ltz,
Expand Down Expand Up @@ -224,10 +176,6 @@
"tt_op": tt_lib_ops.eltwise_abs,
"pytorch_op": pytorch_ops.abs,
},
"eltwise-digamma": {
"tt_op": tt_lib_ops.eltwise_digamma,
"pytorch_op": pytorch_ops.digamma,
},
"eltwise-isfinite": {
"tt_op": tt_lib_ops.eltwise_isfinite,
"pytorch_op": pytorch_ops.isfinite,
Expand All @@ -252,14 +200,6 @@
"tt_op": tt_lib_ops.eltwise_sign,
"pytorch_op": pytorch_ops.sign,
},
"eltwise-multigammaln": {
"tt_op": tt_lib_ops.eltwise_multigammaln,
"pytorch_op": pytorch_ops.multigammaln,
},
"eltwise-silu": {
"tt_op": tt_lib_ops.eltwise_silu,
"pytorch_op": pytorch_ops.silu,
},
"eltwise-elu": {
"tt_op": tt_lib_ops.eltwise_elu,
"pytorch_op": pytorch_ops.elu,
Expand Down Expand Up @@ -292,26 +232,14 @@
"tt_op": tt_lib_ops.eltwise_square,
"pytorch_op": pytorch_ops.square,
},
"eltwise-mish": {
"tt_op": tt_lib_ops.eltwise_mish,
"pytorch_op": pytorch_ops.mish,
},
"eltwise-softplus": {
"tt_op": tt_lib_ops.eltwise_softplus,
"pytorch_op": pytorch_ops.softplus,
},
"eltwise-log1p": {
"tt_op": tt_lib_ops.eltwise_log1p,
"pytorch_op": pytorch_ops.log1p,
},
"eltwise-neg": {
"tt_op": tt_lib_ops.eltwise_neg,
"pytorch_op": pytorch_ops.neg,
},
"eltwise-swish": {
"tt_op": tt_lib_ops.eltwise_swish,
"pytorch_op": pytorch_ops.swish,
},
"eltwise-cos": {
"tt_op": tt_lib_ops.eltwise_cos,
"pytorch_op": pytorch_ops.cos,
Expand All @@ -336,22 +264,10 @@
"tt_op": tt_lib_ops.eltwise_atan,
"pytorch_op": pytorch_ops.atan,
},
"eltwise-atanh": {
"tt_op": tt_lib_ops.eltwise_atanh,
"pytorch_op": pytorch_ops.atanh,
},
"eltwise-acos": {
"tt_op": tt_lib_ops.eltwise_acos,
"pytorch_op": pytorch_ops.acos,
},
"eltwise-asinh": {
"tt_op": tt_lib_ops.eltwise_asinh,
"pytorch_op": pytorch_ops.asinh,
},
"eltwise-acosh": {
"tt_op": tt_lib_ops.eltwise_acosh,
"pytorch_op": pytorch_ops.acosh,
},
"eltwise-exp": {
"tt_op": tt_lib_ops.eltwise_exp,
"pytorch_op": pytorch_ops.exp,
Expand Down Expand Up @@ -384,10 +300,6 @@
"tt_op": tt_lib_ops.eltwise_rsqrt,
"pytorch_op": pytorch_ops.rsqrt,
},
"eltwise-xlogy": {
"tt_op": tt_lib_ops.eltwise_xlogy,
"pytorch_op": pytorch_ops.xlogy,
},
"eltwise-logical_and": {
"tt_op": tt_lib_ops.eltwise_logical_and,
"pytorch_op": pytorch_ops.logical_and,
Expand All @@ -396,18 +308,6 @@
"tt_op": tt_lib_ops.eltwise_logical_andi,
"pytorch_op": pytorch_ops.logical_andi,
},
"eltwise-atan2": {
"tt_op": tt_lib_ops.eltwise_atan2,
"pytorch_op": pytorch_ops.atan2,
},
"eltwise-lerp_binary": {
"tt_op": tt_lib_ops.eltwise_lerp_binary,
"pytorch_op": pytorch_ops.lerp_binary,
},
"eltwise-lerp_ternary": {
"tt_op": tt_lib_ops.eltwise_lerp_ternary,
"pytorch_op": pytorch_ops.lerp_ternary,
},
"eltwise-leaky_relu": {
"tt_op": tt_lib_ops.eltwise_leaky_relu,
"pytorch_op": pytorch_ops.leaky_relu,
Expand All @@ -416,18 +316,10 @@
"tt_op": tt_lib_ops.eltwise_prelu,
"pytorch_op": pytorch_ops.prelu,
},
"eltwise-hardshrink": {
"tt_op": tt_lib_ops.eltwise_hardshrink,
"pytorch_op": pytorch_ops.hardshrink,
},
"eltwise-bias_gelu_unary": {
"tt_op": tt_lib_ops.eltwise_bias_gelu_unary,
"pytorch_op": pytorch_ops.bias_gelu_unary,
},
"eltwise-softshrink": {
"tt_op": tt_lib_ops.eltwise_softshrink,
"pytorch_op": pytorch_ops.softshrink,
},
"eltwise-softsign": {
"tt_op": tt_lib_ops.eltwise_softsign,
"pytorch_op": pytorch_ops.softsign,
Expand All @@ -448,26 +340,10 @@
"tt_op": tt_lib_ops.bert_large_fused_qkv_matmul,
"pytorch_op": pytorch_ops.bert_large_fused_qkv_matmul,
},
"eltwise-polyval": {
"tt_op": tt_lib_ops.eltwise_polyval,
"pytorch_op": pytorch_ops.polyval,
},
"eltwise-mac": {
"tt_op": tt_lib_ops.eltwise_mac,
"pytorch_op": pytorch_ops.mac,
},
"eltwise-addcmul": {
"tt_op": tt_lib_ops.eltwise_addcmul,
"pytorch_op": pytorch_ops.addcmul,
},
"eltwise-celu": {
"tt_op": tt_lib_ops.eltwise_celu,
"pytorch_op": pytorch_ops.celu,
},
"eltwise-addcdiv": {
"tt_op": tt_lib_ops.eltwise_addcdiv,
"pytorch_op": pytorch_ops.addcdiv,
},
"eltwise-sigmoid": {
"tt_op": tt_lib_ops.eltwise_sigmoid,
"pytorch_op": pytorch_ops.sigmoid,
Expand Down Expand Up @@ -536,46 +412,18 @@
"tt_op": tt_lib_ops.eltwise_erfinv,
"pytorch_op": pytorch_ops.erfinv,
},
"eltwise-nextafter": {
"tt_op": tt_lib_ops.eltwise_nextafter,
"pytorch_op": pytorch_ops.nextafter,
},
"eltwise-subalpha": {
"tt_op": tt_lib_ops.eltwise_subalpha,
"pytorch_op": pytorch_ops.subalpha,
},
"eltwise-addalpha": {
"tt_op": tt_lib_ops.eltwise_addalpha,
"pytorch_op": pytorch_ops.addalpha,
},
"eltwise-addalpha-optional": {
"tt_op": tt_lib_ops.eltwise_addalpha_optional,
"pytorch_op": pytorch_ops.addalpha,
},
"lamb-optimizer": {
"tt_op": tt_lib_ops.lamb_optimizer,
"pytorch_op": pytorch_ops.lamb_optimizer,
},
"eltwise-logit": {
"tt_op": tt_lib_ops.eltwise_logit,
"pytorch_op": pytorch_ops.logit,
},
"eltwise-polygamma": {
"tt_op": tt_lib_ops.eltwise_polygamma,
"pytorch_op": pytorch_ops.polygamma,
},
"eltwise-logical_xori": {
"tt_op": tt_lib_ops.eltwise_logical_xori,
"pytorch_op": pytorch_ops.logical_xori,
},
"eltwise-hardsigmoid": {
"tt_op": tt_lib_ops.eltwise_hardsigmoid,
"pytorch_op": pytorch_ops.hardsigmoid,
},
"eltwise-hardswish": {
"tt_op": tt_lib_ops.eltwise_hardswish,
"pytorch_op": pytorch_ops.hardswish,
},
"eltwise-log": {
"tt_op": tt_lib_ops.eltwise_log,
"pytorch_op": pytorch_ops.log,
Expand All @@ -596,10 +444,6 @@
"tt_op": tt_lib_ops.eltwise_tanh_bw,
"pytorch_op": pytorch_ops.tanh_bw,
},
"eltwise-tanhshrink": {
"tt_op": tt_lib_ops.eltwise_tanhshrink,
"pytorch_op": pytorch_ops.tanhshrink,
},
"eltwise-signbit": {
"tt_op": tt_lib_ops.eltwise_signbit,
"pytorch_op": pytorch_ops.signbit,
Expand Down Expand Up @@ -733,22 +577,10 @@
"tt_op": tt_lib_ops.eltwise_rad2deg,
"pytorch_op": pytorch_ops.rad2deg,
},
"eltwise-cbrt": {
"tt_op": tt_lib_ops.eltwise_cbrt,
"pytorch_op": pytorch_ops.cbrt,
},
"eltwise-hypot": {
"tt_op": tt_lib_ops.eltwise_hypot,
"pytorch_op": pytorch_ops.hypot,
},
"eltwise-scatter": {
"tt_op": tt_lib_ops.eltwise_scatter,
"pytorch_op": pytorch_ops.scatter,
},
"eltwise-threshold": {
"tt_op": tt_lib_ops.eltwise_threshold,
"pytorch_op": pytorch_ops.threshold,
},
"eltwise-relu6": {
"tt_op": tt_lib_ops.eltwise_relu6,
"pytorch_op": pytorch_ops.relu6,
Expand Down Expand Up @@ -781,10 +613,6 @@
"tt_op": tt_lib_ops.eltwise_logical_ori,
"pytorch_op": pytorch_ops.logical_ori,
},
"eltwise-isclose": {
"tt_op": tt_lib_ops.eltwise_isclose,
"pytorch_op": pytorch_ops.isclose,
},
# Eltwise binary with optional output
"eltwise-ne-optional": {
"tt_op": tt_lib_ops.eltwise_ne_optional,
Expand Down Expand Up @@ -851,10 +679,6 @@
"pytorch_op": pytorch_ops.logical_and,
},
# Eltwise ternary
"eltwise-arange": {
"tt_op": tt_lib_ops.arange,
"pytorch_op": pytorch_ops.arange,
},
"eltwise-where": {
"tt_op": tt_lib_ops.where,
"pytorch_op": pytorch_ops.where,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,54 +72,5 @@ def test_run_addalpha(
comparison_func,
device,
test_args,
)


shapes_w_output = [
[[1, 1, 32, 32], [1, 1, 32, 32], [1, 1, 32, 32]], # Single core
[[1, 1, 32, 32], [32, 1, 32, 32], [32, 1, 32, 32]], # Single core
[[64, 1, 32, 32], [1, 1, 32, 32], [64, 1, 32, 32]], # Single core
[[1, 1, 320, 384], [1, 1, 320, 384], [1, 1, 320, 384]], # Multi core
[[1, 3, 320, 384], [1, 3, 320, 384], [1, 3, 320, 384]], # Multi core
]


@pytest.mark.skip(reason="Issue: #10735, will be enabled once optional tensor support is added for ttnn.addalpha")
@pytest.mark.parametrize(
"input_shapes",
shapes_w_output,
)
@pytest.mark.parametrize("input_mem_config", input_mem_cfgs)
@pytest.mark.parametrize("fn_kind", ["addalpha"])
@pytest.mark.parametrize("pass_qid", [True, False])
def test_run_addalpha_optional_output(
input_shapes,
fn_kind,
input_mem_config,
device,
pass_qid,
function_level_defaults,
):
datagen_func = [
generation_funcs.gen_func_with_cast(partial(generation_funcs.gen_rand, low=-100, high=100), torch.float32)
] * len(input_shapes)
datagen_func.append(
generation_funcs.gen_func_with_cast(partial(generation_funcs.gen_rand, low=-80, high=80), torch.bfloat16)
)
test_args = list(generation_funcs.gen_default_dtype_layout_device(input_shapes))[0]
test_args.update(
{
"input_mem_config": [input_mem_config, input_mem_config, input_mem_config],
"alpha": np.random.randint(1, 100),
"queue_id": pass_qid,
}
)
comparison_func = comparison_funcs.comp_pcc
run_single_pytorch_test(
f"eltwise-{fn_kind}-optional",
input_shapes,
datagen_func,
comparison_func,
device,
test_args,
ttnn_op=True,
)
Loading

0 comments on commit 5feb7bd

Please sign in to comment.