From d679e662d8bfbed61d4e249b2d5095968187bf7a Mon Sep 17 00:00:00 2001 From: Andrew Fuller Date: Tue, 10 Dec 2024 14:42:51 -0500 Subject: [PATCH] #14393: Use python3 (#15867) ### Ticket Progress towards https://github.com/tenstorrent/tt-metal/issues/14393 ### Problem description Not all systems can be assumed to have `python`. Call out `python3` as that's what we need. ### What's changed env python -> env python3 ### Checklist - [x] Post commit CI passes https://github.com/tenstorrent/tt-metal/actions/runs/12260302459 - [ ] Blackhole Post commit (if applicable) - [ ] Model regression CI testing passes (if applicable) - [ ] Device performance regression CI testing passes (if applicable) - [ ] **(For models and ops writers)** Full [new models](https://github.com/tenstorrent/tt-metal/actions/workflows/full-new-models-suite.yaml) tests passes - [ ] New/Existing tests provide coverage for changes --- models/perf/perf_report.py | 2 +- tests/scripts/run_cpp_unit_tests.sh | 8 ++++---- tests/scripts/run_performance.sh | 8 ++++---- tests/scripts/t3000/run_t3000_model_perf_tests.sh | 4 ++-- tests/scripts/tg/run_tg_model_perf_tests.sh | 4 ++-- tests/scripts/tgg/run_tgg_model_perf_tests.sh | 4 ++-- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/models/perf/perf_report.py b/models/perf/perf_report.py index 379c27c0ce4..13a20e3db52 100755 --- a/models/perf/perf_report.py +++ b/models/perf/perf_report.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # SPDX-FileCopyrightText: © 2023 Tenstorrent Inc. # SPDX-License-Identifier: Apache-2.0 diff --git a/tests/scripts/run_cpp_unit_tests.sh b/tests/scripts/run_cpp_unit_tests.sh index 8179168ac79..ad74b3333aa 100755 --- a/tests/scripts/run_cpp_unit_tests.sh +++ b/tests/scripts/run_cpp_unit_tests.sh @@ -22,16 +22,16 @@ rm -rf $kernel_path ./build/test/tt_metal/distributed/distributed_unit_tests --gtest_filter=MeshDeviceSuite.* if [[ ! -z "$TT_METAL_SLOW_DISPATCH_MODE" ]]; then - env python tests/scripts/run_tt_metal.py --dispatch-mode slow - env python tests/scripts/run_tt_eager.py --dispatch-mode slow + env python3 tests/scripts/run_tt_metal.py --dispatch-mode slow + env python3 tests/scripts/run_tt_eager.py --dispatch-mode slow else TT_METAL_GTEST_NUM_HW_CQS=2 ./build/test/tt_metal/unit_tests_dispatch --gtest_filter=MultiCommandQueue*Fixture.* # Enable this on BH after #14613 if [[ "$ARCH_NAME" == "wormhole_b0" ]]; then TT_METAL_GTEST_ETH_DISPATCH=1 ./build/test/tt_metal/unit_tests_dispatch fi - env python tests/scripts/run_tt_eager.py --dispatch-mode fast - env python tests/scripts/run_tt_metal.py --dispatch-mode fast + env python3 tests/scripts/run_tt_eager.py --dispatch-mode fast + env python3 tests/scripts/run_tt_metal.py --dispatch-mode fast fi # Tool tests use C++ unit tests so include them here. diff --git a/tests/scripts/run_performance.sh b/tests/scripts/run_performance.sh index 7c42512474d..514bf13510c 100755 --- a/tests/scripts/run_performance.sh +++ b/tests/scripts/run_performance.sh @@ -44,7 +44,7 @@ run_perf_models_other() { env pytest -n auto models/demos/squeezebert/tests/test_performance.py -m $test_marker ## Merge all the generated reports - env python models/perf/merge_perf_results.py + env python3 models/perf/merge_perf_results.py } run_perf_models_llm_javelin() { @@ -64,7 +64,7 @@ run_perf_models_llm_javelin() { env pytest -n auto models/demos/wormhole/mamba/tests -m $test_marker fi ## Merge all the generated reports - env python models/perf/merge_perf_results.py + env python3 models/perf/merge_perf_results.py } run_perf_models_cnn_javelin() { @@ -76,7 +76,7 @@ run_perf_models_cnn_javelin() { env WH_ARCH_YAML=wormhole_b0_80_arch_eth_dispatch.yaml pytest -n auto models/demos/wormhole/stable_diffusion/tests -m $test_marker --timeout=480 ## Merge all the generated reports - env python models/perf/merge_perf_results.py + env python3 models/perf/merge_perf_results.py } run_device_perf_models() { @@ -128,7 +128,7 @@ run_device_perf_models() { fi ## Merge all the generated reports - env python models/perf/merge_device_perf_results.py + env python3 models/perf/merge_device_perf_results.py } run_device_perf_ops() { diff --git a/tests/scripts/t3000/run_t3000_model_perf_tests.sh b/tests/scripts/t3000/run_t3000_model_perf_tests.sh index eff50354e04..0b51812929a 100755 --- a/tests/scripts/t3000/run_t3000_model_perf_tests.sh +++ b/tests/scripts/t3000/run_t3000_model_perf_tests.sh @@ -144,7 +144,7 @@ run_t3000_llm_tests() { run_t3000_falcon40b_tests # Merge all the generated reports - env python models/perf/merge_perf_results.py + env python3 models/perf/merge_perf_results.py } run_t3000_cnn_tests() { @@ -152,7 +152,7 @@ run_t3000_cnn_tests() { run_t3000_resnet50_tests # Merge all the generated reports - env python models/perf/merge_perf_results.py + env python3 models/perf/merge_perf_results.py } run_t3000_ccl_tests() { diff --git a/tests/scripts/tg/run_tg_model_perf_tests.sh b/tests/scripts/tg/run_tg_model_perf_tests.sh index d86a7a96688..cf8f9f14c6e 100755 --- a/tests/scripts/tg/run_tg_model_perf_tests.sh +++ b/tests/scripts/tg/run_tg_model_perf_tests.sh @@ -7,7 +7,7 @@ run_tg_llm_tests() { pytest -n auto models/demos/t3000/llama2_70b/tests/test_llama_perf_decode.py -m "model_perf_tg" --timeout=600 ; fail+=$? # Merge all the generated reports - env python models/perf/merge_perf_results.py; fail+=$? + env python3 models/perf/merge_perf_results.py; fail+=$? if [[ $fail -ne 0 ]]; then echo "LOG_METAL: run_tg_model_perf_tests failed" @@ -21,7 +21,7 @@ run_tg_cnn_tests() { env pytest -n auto models/demos/tg/resnet50/tests/test_perf_e2e_resnet50.py -m "model_perf_tg" ; fail+=$? # Merge all the generated reports - env python models/perf/merge_perf_results.py; fail+=$? + env python3 models/perf/merge_perf_results.py; fail+=$? if [[ $fail -ne 0 ]]; then echo "LOG_METAL: run_tg_model_perf_tests failed" diff --git a/tests/scripts/tgg/run_tgg_model_perf_tests.sh b/tests/scripts/tgg/run_tgg_model_perf_tests.sh index c75b5ea6cab..56d43955f02 100755 --- a/tests/scripts/tgg/run_tgg_model_perf_tests.sh +++ b/tests/scripts/tgg/run_tgg_model_perf_tests.sh @@ -3,7 +3,7 @@ set -eo pipefail run_tgg_llm_tests() { # Merge all the generated reports - env python models/perf/merge_perf_results.py + env python3 models/perf/merge_perf_results.py } run_tgg_cnn_tests() { @@ -12,7 +12,7 @@ run_tgg_cnn_tests() { env pytest -n auto models/demos/tgg/resnet50/tests/test_perf_e2e_resnet50.py -m "model_perf_tgg" --timeout=900 ; fail+=$? # Merge all the generated reports - env python models/perf/merge_perf_results.py + env python3 models/perf/merge_perf_results.py } main() {