Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

#14393: Use python3 #15867

Merged
merged 1 commit into from
Dec 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion models/perf/perf_report.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
# SPDX-FileCopyrightText: © 2023 Tenstorrent Inc.

# SPDX-License-Identifier: Apache-2.0
Expand Down
8 changes: 4 additions & 4 deletions tests/scripts/run_cpp_unit_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,16 +22,16 @@ rm -rf $kernel_path
./build/test/tt_metal/distributed/distributed_unit_tests --gtest_filter=MeshDeviceSuite.*

if [[ ! -z "$TT_METAL_SLOW_DISPATCH_MODE" ]]; then
env python tests/scripts/run_tt_metal.py --dispatch-mode slow
env python tests/scripts/run_tt_eager.py --dispatch-mode slow
env python3 tests/scripts/run_tt_metal.py --dispatch-mode slow
env python3 tests/scripts/run_tt_eager.py --dispatch-mode slow
else
TT_METAL_GTEST_NUM_HW_CQS=2 ./build/test/tt_metal/unit_tests_dispatch --gtest_filter=MultiCommandQueue*Fixture.*
# Enable this on BH after #14613
if [[ "$ARCH_NAME" == "wormhole_b0" ]]; then
TT_METAL_GTEST_ETH_DISPATCH=1 ./build/test/tt_metal/unit_tests_dispatch
fi
env python tests/scripts/run_tt_eager.py --dispatch-mode fast
env python tests/scripts/run_tt_metal.py --dispatch-mode fast
env python3 tests/scripts/run_tt_eager.py --dispatch-mode fast
env python3 tests/scripts/run_tt_metal.py --dispatch-mode fast
fi

# Tool tests use C++ unit tests so include them here.
Expand Down
8 changes: 4 additions & 4 deletions tests/scripts/run_performance.sh
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ run_perf_models_other() {
env pytest -n auto models/demos/squeezebert/tests/test_performance.py -m $test_marker

## Merge all the generated reports
env python models/perf/merge_perf_results.py
env python3 models/perf/merge_perf_results.py
}

run_perf_models_llm_javelin() {
Expand All @@ -64,7 +64,7 @@ run_perf_models_llm_javelin() {
env pytest -n auto models/demos/wormhole/mamba/tests -m $test_marker
fi
## Merge all the generated reports
env python models/perf/merge_perf_results.py
env python3 models/perf/merge_perf_results.py
}

run_perf_models_cnn_javelin() {
Expand All @@ -76,7 +76,7 @@ run_perf_models_cnn_javelin() {
env WH_ARCH_YAML=wormhole_b0_80_arch_eth_dispatch.yaml pytest -n auto models/demos/wormhole/stable_diffusion/tests -m $test_marker --timeout=480

## Merge all the generated reports
env python models/perf/merge_perf_results.py
env python3 models/perf/merge_perf_results.py
}

run_device_perf_models() {
Expand Down Expand Up @@ -128,7 +128,7 @@ run_device_perf_models() {
fi

## Merge all the generated reports
env python models/perf/merge_device_perf_results.py
env python3 models/perf/merge_device_perf_results.py
}

run_device_perf_ops() {
Expand Down
4 changes: 2 additions & 2 deletions tests/scripts/t3000/run_t3000_model_perf_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -144,15 +144,15 @@ run_t3000_llm_tests() {
run_t3000_falcon40b_tests

# Merge all the generated reports
env python models/perf/merge_perf_results.py
env python3 models/perf/merge_perf_results.py
}

run_t3000_cnn_tests() {
# Run resnet50 tests
run_t3000_resnet50_tests

# Merge all the generated reports
env python models/perf/merge_perf_results.py
env python3 models/perf/merge_perf_results.py
}

run_t3000_ccl_tests() {
Expand Down
4 changes: 2 additions & 2 deletions tests/scripts/tg/run_tg_model_perf_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ run_tg_llm_tests() {
pytest -n auto models/demos/t3000/llama2_70b/tests/test_llama_perf_decode.py -m "model_perf_tg" --timeout=600 ; fail+=$?

# Merge all the generated reports
env python models/perf/merge_perf_results.py; fail+=$?
env python3 models/perf/merge_perf_results.py; fail+=$?

if [[ $fail -ne 0 ]]; then
echo "LOG_METAL: run_tg_model_perf_tests failed"
Expand All @@ -21,7 +21,7 @@ run_tg_cnn_tests() {
env pytest -n auto models/demos/tg/resnet50/tests/test_perf_e2e_resnet50.py -m "model_perf_tg" ; fail+=$?

# Merge all the generated reports
env python models/perf/merge_perf_results.py; fail+=$?
env python3 models/perf/merge_perf_results.py; fail+=$?

if [[ $fail -ne 0 ]]; then
echo "LOG_METAL: run_tg_model_perf_tests failed"
Expand Down
4 changes: 2 additions & 2 deletions tests/scripts/tgg/run_tgg_model_perf_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ set -eo pipefail

run_tgg_llm_tests() {
# Merge all the generated reports
env python models/perf/merge_perf_results.py
env python3 models/perf/merge_perf_results.py
}

run_tgg_cnn_tests() {
Expand All @@ -12,7 +12,7 @@ run_tgg_cnn_tests() {
env pytest -n auto models/demos/tgg/resnet50/tests/test_perf_e2e_resnet50.py -m "model_perf_tgg" --timeout=900 ; fail+=$?

# Merge all the generated reports
env python models/perf/merge_perf_results.py
env python3 models/perf/merge_perf_results.py
}

main() {
Expand Down
Loading