From b4eec340871e6e31c7d8a879275157ae16a6beaa Mon Sep 17 00:00:00 2001 From: Robert Shaw Date: Mon, 1 Jul 2024 13:12:02 +0000 Subject: [PATCH] format --- tests/distributed/test_multimodal_broadcast.py | 1 - tests/distributed/test_parallel_state.py | 4 ++-- tests/models/test_compressed_tensors.py | 3 +-- tests/spec_decode/e2e/test_integration_dist_tp2.py | 3 +-- tests/tokenization/test_get_eos.py | 2 +- tests/worker/test_model_input.py | 1 + 6 files changed, 6 insertions(+), 8 deletions(-) diff --git a/tests/distributed/test_multimodal_broadcast.py b/tests/distributed/test_multimodal_broadcast.py index 596e82814f09b..18423c3052f54 100644 --- a/tests/distributed/test_multimodal_broadcast.py +++ b/tests/distributed/test_multimodal_broadcast.py @@ -22,7 +22,6 @@ pytest.skip("TEST_DISTRIBUTED=DISABLE, skipping distributed test group", allow_module_level=True) - model = os.environ["TEST_DIST_MODEL"] if model.startswith("llava-hf/llava"): diff --git a/tests/distributed/test_parallel_state.py b/tests/distributed/test_parallel_state.py index 2e8a72e2dd1ba..ac4cd52ba9fe3 100644 --- a/tests/distributed/test_parallel_state.py +++ b/tests/distributed/test_parallel_state.py @@ -3,15 +3,15 @@ import pytest import torch +from tests.nm_utils.utils_skip import should_skip_test_group from vllm.distributed.parallel_state import (_split_tensor_dict, _update_nested_dict) -from tests.nm_utils.utils_skip import should_skip_test_group - if should_skip_test_group(group_name="TEST_DISTRIBUTED"): pytest.skip("TEST_DISTRIBUTED=DISABLE, skipping distributed test group", allow_module_level=True) + def test_split_tensor_dict(): test_dict = { "key_a": "a", diff --git a/tests/models/test_compressed_tensors.py b/tests/models/test_compressed_tensors.py index 0a42e9773afa1..baaab38e5e008 100644 --- a/tests/models/test_compressed_tensors.py +++ b/tests/models/test_compressed_tensors.py @@ -7,9 +7,8 @@ import pytest -from tests.quantization.utils import is_quant_method_supported - from tests.nm_utils.utils_skip import should_skip_test_group +from tests.quantization.utils import is_quant_method_supported from .utils import check_logprobs_close diff --git a/tests/spec_decode/e2e/test_integration_dist_tp2.py b/tests/spec_decode/e2e/test_integration_dist_tp2.py index d858a3464cb08..86fd88d57967c 100644 --- a/tests/spec_decode/e2e/test_integration_dist_tp2.py +++ b/tests/spec_decode/e2e/test_integration_dist_tp2.py @@ -5,9 +5,8 @@ import pytest import torch -from vllm.utils import is_hip - from tests.nm_utils.utils_skip import should_skip_test_group +from vllm.utils import is_hip from .conftest import run_greedy_equality_correctness_test diff --git a/tests/tokenization/test_get_eos.py b/tests/tokenization/test_get_eos.py index 766901d6a949d..8da4b0d9b99f9 100644 --- a/tests/tokenization/test_get_eos.py +++ b/tests/tokenization/test_get_eos.py @@ -9,11 +9,11 @@ from vllm.transformers_utils.config import try_get_generation_config from vllm.transformers_utils.tokenizer import get_tokenizer - if should_skip_test_group(group_name="TEST_TOKENIZATION"): pytest.skip("TEST_TOKENIZATION=DISABLE, skipping tokenization test group", allow_module_level=True) + def test_get_llama3_eos_token(): model_name = "meta-llama/Meta-Llama-3-8B-Instruct" diff --git a/tests/worker/test_model_input.py b/tests/worker/test_model_input.py index 4f121296cd0c5..c1cbbd33b5c3b 100644 --- a/tests/worker/test_model_input.py +++ b/tests/worker/test_model_input.py @@ -17,6 +17,7 @@ pytest.skip("TEST_WORKER=DISABLE, skipping worker test group", allow_module_level=True) + class MockAttentionBackend(AttentionBackend): @staticmethod