forked from vllm-project/vllm
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[torch.compile] integration with compilation control (vllm-project#9058)
Signed-off-by: Sumit Dubey <[email protected]>
- Loading branch information
1 parent
c2d3cdf
commit 3ab8b1d
Showing
22 changed files
with
404 additions
and
98 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,48 @@ | ||
from typing import Dict, List, Optional | ||
|
||
import pytest | ||
|
||
from vllm.compilation.levels import CompilationLevel | ||
from vllm.utils import cuda_device_count_stateless | ||
|
||
from ..utils import compare_all_settings | ||
|
||
|
||
# we cannot afford testing the full Catesian product | ||
# of all models and all levels | ||
@pytest.mark.parametrize( | ||
"model, model_args, pp_size, tp_size, attn_backend, method, fullgraph", | ||
[ | ||
("meta-llama/Meta-Llama-3-8B", [], 2, 2, "FLASH_ATTN", "generate", | ||
True), | ||
("nm-testing/Meta-Llama-3-8B-Instruct-W8A8-Dyn-Per-Token-2048-Samples", | ||
["--quantization", "compressed-tensors" | ||
], 1, 1, "FLASH_ATTN", "generate", True), | ||
("google/gemma-2-2b-it", [], 1, 2, "FLASHINFER", "generate", True), | ||
# TODO: add multi-modality test for llava | ||
("llava-hf/llava-1.5-7b-hf", [], 2, 1, "FLASHINFER", "generate", False) | ||
]) | ||
def test_compile_correctness(model, model_args, pp_size, tp_size, attn_backend, | ||
method, fullgraph): | ||
# this test is run under multiple suits, with different GPUs. | ||
# make sure we only run the test with correct CUDA devices. | ||
# don't use "<", as it will duplicate the tests. | ||
if cuda_device_count_stateless() != pp_size * tp_size: | ||
pytest.skip("Not correct CUDA devices for the test.") | ||
import os | ||
os.environ["VLLM_ATTENTION_BACKEND"] = attn_backend | ||
if not fullgraph: | ||
os.environ["VLLM_TEST_DYNAMO_FULLGRAPH_CAPTURE"] = "0" | ||
all_args = [["--enforce-eager"] + model_args + ["--max_model_len", "1024"] | ||
+ ["-pp", str(pp_size)] + ["-tp", str(tp_size)]] * 3 | ||
# don't test VLLM_TORCH_COMPILE_LEVEL == 3 case | ||
# inductor will change the output, so we cannot compare them. | ||
all_envs: List[Optional[Dict[str, str]]] = [{ | ||
"VLLM_TORCH_COMPILE_LEVEL": | ||
str(level) | ||
} for level in [ | ||
CompilationLevel.NO_COMPILATION, | ||
CompilationLevel.DYNAMO_AS_IS, | ||
CompilationLevel.DYNAMO_ONCE, | ||
]] | ||
compare_all_settings(model, all_args, all_envs, method=method) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,13 +1,20 @@ | ||
import pytest | ||
|
||
from vllm.compilation.backends import vllm_backend | ||
from vllm.compilation.levels import CompilationLevel | ||
|
||
from ..utils import fork_new_process_for_each_test | ||
from .utils import TEST_MODELS, check_full_graph_support | ||
|
||
|
||
@pytest.mark.parametrize("model_info", TEST_MODELS) | ||
@pytest.mark.parametrize("backend", ["eager", vllm_backend]) | ||
def test_full_graph(model_info, backend): | ||
@pytest.mark.parametrize( | ||
"optimization_level", | ||
[CompilationLevel.DYNAMO_ONCE, CompilationLevel.INDUCTOR]) | ||
@fork_new_process_for_each_test | ||
def test_full_graph(model_info, optimization_level): | ||
model = model_info[0] | ||
model_kwargs = model_info[1] | ||
check_full_graph_support(model, model_kwargs, backend, tp_size=1) | ||
check_full_graph_support(model, | ||
model_kwargs, | ||
optimization_level, | ||
tp_size=1) |
This file was deleted.
Oops, something went wrong.
This file was deleted.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,23 @@ | ||
from contextlib import contextmanager | ||
from typing import Any | ||
|
||
_compile_context: Any = None | ||
|
||
|
||
def get_compile_context() -> Any: | ||
"""Get the current compile context.""" | ||
return _compile_context | ||
|
||
|
||
@contextmanager | ||
def set_compile_context(context: Any): | ||
"""A context manager that stores the current compile context, | ||
usually it is a list of sizes to specialize. | ||
""" | ||
global _compile_context | ||
prev_context = _compile_context | ||
_compile_context = context | ||
try: | ||
yield | ||
finally: | ||
_compile_context = prev_context |
Oops, something went wrong.