diff --git a/tests/scripts/single_card/run_single_card_demo_tests.sh b/tests/scripts/single_card/run_single_card_demo_tests.sh index 73e4931084e..ee1df0e5567 100755 --- a/tests/scripts/single_card/run_single_card_demo_tests.sh +++ b/tests/scripts/single_card/run_single_card_demo_tests.sh @@ -42,21 +42,6 @@ run_common_perf_tests(){ # Skipping: kills WH cards, check issue #14440 # WH_ARCH_YAML=wormhole_b0_80_arch_eth_dispatch.yaml pytest -n auto models/demos/wormhole/mistral7b/demo/demo_with_prefill.py --timeout 420; fail+=$? - # Llama3.1-8B - llama8b=/mnt/MLPerf/tt_dnn-models/llama/Meta-Llama-3.1-8B-Instruct/ - # Llama3.2-1B - llama1b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-1B-Instruct/ - # Llama3.2-3B - llama3b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-3B-Instruct/ - # Llama3.2-11B - llama11b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-11B-Vision-Instruct/ - - # Run all Llama3 tests for 8B, 1B, and 3B weights - for llama_dir in "$llama1b" "$llama3b" "$llama8b" "$llama11b"; do - LLAMA_DIR=$llama_dir WH_ARCH_YAML=wormhole_b0_80_arch_eth_dispatch.yaml pytest -n auto models/demos/llama3/demo/demo.py --timeout 600; fail+=$? - echo "LOG_METAL: Llama3 tests for $llama_dir completed" - done - # Mamba WH_ARCH_YAML=wormhole_b0_80_arch_eth_dispatch.yaml pytest -n auto --disable-warnings -q -s --input-method=json --input-path='models/demos/wormhole/mamba/demo/prompts.json' models/demos/wormhole/mamba/demo/demo.py --timeout 420; fail+=$? @@ -91,6 +76,27 @@ run_n300_perf_tests(){ run_common_perf_tests; fail+=$? + # Llama3.1-8B + llama8b=/mnt/MLPerf/tt_dnn-models/llama/Meta-Llama-3.1-8B-Instruct/ + # Llama3.2-1B + llama1b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-1B-Instruct/ + # Llama3.2-3B + llama3b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-3B-Instruct/ + # Llama3.2-11B + llama11b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-11B-Vision-Instruct/ + + # Run all Llama3 tests for 1B, 3B, 8B and 11B weights for N150 + # To ensure a proper perf measurement and dashboard upload of the Llama3 models on a N150, we have to run them on the N300 perf pipeline for now + for llama_dir in "$llama1b" "$llama3b" "$llama8b" "$llama11b"; do + FAKE_DEVICE=N150 LLAMA_DIR=$llama_dir WH_ARCH_YAML=wormhole_b0_80_arch_eth_dispatch.yaml pytest -n auto models/demos/llama3/demo/demo.py --timeout 600; fail+=$? + echo "LOG_METAL: Llama3 tests for $llama_dir completed on N150" + done + # Run all Llama3 tests for 1B, 3B, 8B and 11B weights + for llama_dir in "$llama1b" "$llama3b" "$llama8b" "$llama11b"; do + LLAMA_DIR=$llama_dir WH_ARCH_YAML=wormhole_b0_80_arch_eth_dispatch.yaml pytest -n auto models/demos/llama3/demo/demo.py --timeout 600; fail+=$? + echo "LOG_METAL: Llama3 tests for $llama_dir completed" + done + # Falcon7b (perf verification for 128/1024/2048 seq lens and output token verification) WH_ARCH_YAML=wormhole_b0_80_arch_eth_dispatch.yaml pytest -n auto --disable-warnings -q -s --input-method=json --input-path='models/demos/falcon7b_common/demo/input_data.json' models/demos/wormhole/falcon7b/demo_wormhole.py; fail+=$?