diff --git a/docs/source/using-the-python-api.mdx b/docs/source/using-the-python-api.mdx index 2e160a67..8c44050f 100644 --- a/docs/source/using-the-python-api.mdx +++ b/docs/source/using-the-python-api.mdx @@ -11,7 +11,7 @@ After that, simply run the pipeline and save the results. ```python import lighteval from lighteval.logging.evaluation_tracker import EvaluationTracker -from lighteval.models.model_config import VLLMModelConfig +from lighteval.models.vllm.vllm_model import VLLMModelConfig from lighteval.pipeline import ParallelismManager, Pipeline, PipelineParameters from lighteval.utils.utils import EnvConfig from lighteval.utils.imports import is_accelerate_available diff --git a/src/lighteval/main_endpoint.py b/src/lighteval/main_endpoint.py index 952aae07..be75b711 100644 --- a/src/lighteval/main_endpoint.py +++ b/src/lighteval/main_endpoint.py @@ -93,7 +93,7 @@ def openai( Evaluate OPENAI models. """ from lighteval.logging.evaluation_tracker import EvaluationTracker - from lighteval.models.model_config import OpenAIModelConfig + from lighteval.models.endpoints.openai_model import OpenAIModelConfig from lighteval.pipeline import EnvConfig, ParallelismManager, Pipeline, PipelineParameters env_config = EnvConfig(token=TOKEN, cache_dir=cache_dir) @@ -317,7 +317,7 @@ def tgi( import yaml from lighteval.logging.evaluation_tracker import EvaluationTracker - from lighteval.models.model_config import TGIModelConfig + from lighteval.models.endpoints.tgi_model import TGIModelConfig from lighteval.pipeline import EnvConfig, ParallelismManager, Pipeline, PipelineParameters env_config = EnvConfig(token=TOKEN, cache_dir=cache_dir)