Skip to content

Commit

Permalink
#8865: Switch ported ops from tt_lib to ttnn for host dispatch time m…
Browse files Browse the repository at this point in the history
…easuring
  • Loading branch information
nemanjagrujic committed Jul 9, 2024
1 parent e9e3ad3 commit f5c2ff5
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 14 deletions.
12 changes: 6 additions & 6 deletions tests/tt_eager/profiling/ops_for_profiling.py
Original file line number Diff line number Diff line change
Expand Up @@ -2485,15 +2485,15 @@ def addalpha_bw(x, y, z):


def addcmul_bw(x, y, z):
tt_lib.tensor.addcmul_bw(x, x, y, z, value=5)
ttnn.addcmul_bw(x, x, y, z, value=5)


def addcdiv_bw(x, y, z):
tt_lib.tensor.addcdiv_bw(x, x, y, z, value=5)
ttnn.addcdiv_bw(x, x, y, z, alpha=5)


def where_bw(x, y, z):
tt_lib.tensor.where_bw(x, y, z, z)
ttnn.where_bw(x, y, z, z)


def bias_gelu_bw_none(x, y, z):
Expand Down Expand Up @@ -2644,11 +2644,11 @@ def fused_linear_shape_func(input_shape):
},
{
"op": addcmul_bw,
"name": "tt_lib.tensor.addcmul_bw",
"name": "ttnn.addcmul_bw",
},
{
"op": addcdiv_bw,
"name": "tt_lib.tensor.addcdiv_bw",
"name": "ttnn.addcdiv_bw",
},
{
"op": ttnn.binary_assign_bw,
Expand Down Expand Up @@ -2701,7 +2701,7 @@ def fused_linear_shape_func(input_shape):
# },
{
"op": where_bw,
"name": "tt_lib.tensor.where_bw",
"name": "ttnn.where_bw",
},
{
"op": ttnn.sub_bw,
Expand Down
6 changes: 3 additions & 3 deletions tests/tt_eager/profiling/profile_host_overhead.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
import statistics
from loguru import logger

# from models.utility_functions import torch2tt_tensor
from models.utility_functions import torch2tt_tensor
from tests.tt_eager.profiling import ops_for_profiling
from tracy import signpost

Expand All @@ -36,8 +36,8 @@
NUM_REPEATS = 15


def torch2tt_tensor(x, device, dlayout, in_mem_config, dtype):
return tt_lib.tensor.Tensor(x, dtype).pad_to_tile(float("nan")).to(dlayout).to(device, in_mem_config)
# def torch2tt_tensor(x, device, dlayout, in_mem_config, dtype):
# return tt_lib.tensor.Tensor(x, dtype).pad_to_tile(float("nan")).to(dlayout).to(device, in_mem_config)


def measure_host_overhead(op_func, op_name, device, num_call_to_stack, is_warmup):
Expand Down
9 changes: 4 additions & 5 deletions tests/tt_eager/profiling/reference.txt
Original file line number Diff line number Diff line change
Expand Up @@ -51,9 +51,9 @@ tt_lib.tensor.add_unary,40,0.89,1.0,1.3,1.03
tt_lib.tensor.addalpha,40,1.71,1.9,2.36,1.94
ttnn.addalpha_bw,40,1.01,1.42,1.69,1.46
tt_lib.tensor.addcdiv,40,17.67,19.69,20.7,19.42
tt_lib.tensor.addcdiv_bw,40,31.79,34.53,34.99,33.88
ttnn.addcdiv_bw,40,31.79,34.53,34.99,33.88
tt_lib.tensor.addcmul,40,2.5,2.66,2.99,2.65
tt_lib.tensor.addcmul_bw,40,3.68,4.31,4.62,4.13
ttnn.addcmul_bw,40,3.68,4.31,4.62,4.13
tt_lib.tensor.angle_bw,40,0.95,1.04,1.29,1.05
tt_lib.tensor.arange,40,0.02,0.03,0.12,0.0
tt_lib.tensor.argmax_all,40,113.57,118.43,119.39,39.46
Expand Down Expand Up @@ -98,7 +98,6 @@ ttnn.binary_gt_bw,40,1.78,2.16,2.43,2.24
ttnn.binary_le_bw,40,1.98,2.06,2.44,4.37
ttnn.binary_lt_bw,40,1.9,2.27,2.56,2.18
ttnn.binary_ne_bw,40,1.88,2.17,2.57,2.11
tt_lib.tensor.bmm,40,0.9,0.96,3.05,1.0
tt_lib.tensor.cbrt,40,4.93,5.56,5.86,5.32
tt_lib.tensor.ceil_bw,40,0.96,1.06,1.35,1.09
tt_lib.tensor.celu_bw,40,10.62,11.46,12.0,11.09
Expand Down Expand Up @@ -249,7 +248,7 @@ ttnn.lt,40,0.9,1.19,1.24,1.12
tt_lib.tensor.ltz,40,0.81,0.9,1.12,0.93
tt_lib.tensor.mac,40,1.69,1.79,2.13,1.83
tt_lib.tensor.maeloss,40,11.89,13.2,14.13,12.75
tt_lib.tensor.matmul,40,0.88,1.78,3.89,1.5
ttnn.matmul,40,0.88,1.78,3.89,1.5
ttnn.max_bw,40,9.93,10.57,11.04,10.39
tt_lib.tensor.mean_hw,40,1.17,1.44,1.88,1.62
ttnn.min_bw,40,9.15,10.14,10.67,10.13
Expand Down Expand Up @@ -388,7 +387,7 @@ tt_lib.tensor.var_hw,40,3.69,4.05,4.4,4.17
tt_lib.tensor.where,40,3.57,4.16,4.47,4.19
tt_lib.tensor.where_binary_x_const_y,40,4.46,5.09,5.27,4.84
tt_lib.tensor.where_binary_x_y_const,40,4.1,4.33,4.65,4.25
tt_lib.tensor.where_bw,40,8.32,9.01,10.22,9.05
ttnn.where_bw,40,8.32,9.01,10.22,9.05
tt_lib.tensor.where_unary_x_const_const,40,3.96,4.18,4.58,4.17
tt_lib.tensor.xlogy,40,10.34,10.84,11.43,10.71
ttnn.xlogy_bw,40,34.64,38.33,40.12,38.53
Expand Down

0 comments on commit f5c2ff5

Please sign in to comment.