Skip to content

Commit

Permalink
wh perf
Browse files Browse the repository at this point in the history
  • Loading branch information
Pavle Josipovic committed Dec 2, 2024
1 parent db8c8d4 commit 064cd53
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 6 deletions.
4 changes: 2 additions & 2 deletions models/demos/vgg/tests/test_perf_vgg.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,10 +137,10 @@ def test_perf_device_bare_metal_vgg(batch_size, model_name):
margin = 0.03

if model_name == "ttnn_vgg11":
expected_perf = 168 if is_grayskull() else 283.289
expected_perf = 168 if is_grayskull() else 356
command = f"pytest tests/ttnn/integration_tests/vgg/test_ttnn_vgg11.py"
else:
expected_perf = 144 if is_grayskull() else 201.3867
expected_perf = 144 if is_grayskull() else 276
command = f"pytest tests/ttnn/integration_tests/vgg/test_ttnn_vgg16.py"

cols = ["DEVICE FW", "DEVICE KERNEL", "DEVICE BRISC KERNEL"]
Expand Down
10 changes: 6 additions & 4 deletions models/demos/vgg/tt/ttnn_vgg.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@
]
conv_feature_ids = [0, 2, 5, 7, 10, 12, 14, 17, 19, 21, 24, 26, 28]
classifier_ids = [0, 3, 6]
h_override = [128, 128, 128, 64, 32, 32, 32, 32, 32, 32, 32, 32, 32]
h_override = [None, None, None, None, None, 256, 256, None, None, None, None, None, None]


def ttnn_vgg16(
Expand Down Expand Up @@ -98,14 +98,15 @@ def ttnn_vgg16(
deallocate_activation=False,
input_channels_alignment=32,
reallocate_halo_output=False,
act_block_h_override=h_override[iter_conv_id],
transpose_shards=True,
shard_layout=(
ttnn.TensorMemoryLayout.HEIGHT_SHARDED if h_sharding else ttnn.TensorMemoryLayout.BLOCK_SHARDED
),
reshard_if_not_optimal=True,
enable_weights_double_buffer=True,
)
if h_override[iter_conv_id] is not None:
conv_config.act_block_h_override = h_override[iter_conv_id]

tt_weight = parameters.features[conv_feature_ids[iter_conv_id]].weight
tt_weight = ttnn.to_layout(ttnn.from_device(tt_weight), layout=ttnn.ROW_MAJOR_LAYOUT)
Expand Down Expand Up @@ -173,7 +174,7 @@ def ttnn_vgg16(
[512, 512, 14, 14],
[512, 512, 14, 14],
]
height_override_11 = [128, 128, 32, 32, 32, 32, 32, 32]
height_override_11 = [None, None, None, 256, None, None, None, None]


def ttnn_vgg11(
Expand Down Expand Up @@ -220,13 +221,14 @@ def ttnn_vgg11(
deallocate_activation=False,
input_channels_alignment=32,
reallocate_halo_output=False,
act_block_h_override=height_override_11[iter_conv_id],
transpose_shards=True,
shard_layout=(
ttnn.TensorMemoryLayout.HEIGHT_SHARDED if h_sharding else ttnn.TensorMemoryLayout.BLOCK_SHARDED
),
enable_weights_double_buffer=True,
)
if height_override_11[iter_conv_id] is not None:
conv_config.act_block_h_override = height_override_11[iter_conv_id]

tt_weight = parameters.features[conv_feature_ids_2[iter_conv_id]].weight
tt_weight = ttnn.to_layout(ttnn.from_device(tt_weight), layout=ttnn.ROW_MAJOR_LAYOUT)
Expand Down

0 comments on commit 064cd53

Please sign in to comment.