From 51c06d660bb6273947bb9520999263c0666306bc Mon Sep 17 00:00:00 2001 From: Sadra Barikbin Date: Tue, 7 May 2024 22:41:15 +0330 Subject: [PATCH] Fix a tiny typo & a tiny bug --- src/lighteval/logging/evaluation_tracker.py | 2 +- src/lighteval/models/model_config.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/lighteval/logging/evaluation_tracker.py b/src/lighteval/logging/evaluation_tracker.py index 35a835bc1..f4bdf9566 100644 --- a/src/lighteval/logging/evaluation_tracker.py +++ b/src/lighteval/logging/evaluation_tracker.py @@ -511,7 +511,7 @@ def push_results_to_tensorboard( # noqa: C901 self, results: dict[str, dict[str, float]], details: dict[str, DetailsLogger.CompiledDetail] ): if not is_nanotron_available(): - hlog_warn("You cannot push results to tensorboard with having nanotron installed. Skipping") + hlog_warn("You cannot push results to tensorboard without having nanotron installed. Skipping") return config: Config = self.general_config_logger.config lighteval_config = config.lighteval diff --git a/src/lighteval/models/model_config.py b/src/lighteval/models/model_config.py index d62a85d3c..ee76f4524 100644 --- a/src/lighteval/models/model_config.py +++ b/src/lighteval/models/model_config.py @@ -85,9 +85,9 @@ class BaseModelConfig: If `None`, the default value will be set to `True` for seq2seq models (e.g. T5) and `False` for causal models. model_parallel (bool, optional, defaults to False): - True/False: force to uses or not the `accelerate` library to load a large + True/False: force to use or not the `accelerate` library to load a large model across multiple devices. - Default: None which correspond to comparing the number of process with + Default: None which corresponds to comparing the number of processes with the number of GPUs. If it's smaller => model-parallelism, else not. dtype (Union[str, torch.dtype], optional, defaults to None):): Converts the model weights to `dtype`, if specified. Strings get @@ -279,8 +279,8 @@ def create_model_config(args: Namespace, accelerator: Union["Accelerator", None] if config["type"] == "tgi": return TGIModelConfig( - inference_server_address=args["instance"]["inference_server_address"], - inference_server_auth=args["instance"]["inference_server_auth"], + inference_server_address=config["instance"]["inference_server_address"], + inference_server_auth=config["instance"]["inference_server_auth"], ) if config["type"] == "endpoint":